1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2020 Marvell International Ltd.
4  */
5 
6 #ifndef __CVMX_LMCX_DEFS_H__
7 #define __CVMX_LMCX_DEFS_H__
8 
9 #define CVMX_LMCX_BANK_CONFLICT1(offs)			\
10 	((0x000360ull) + ((offs) & 3) * 0x1000000ull)
11 #define CVMX_LMCX_BANK_CONFLICT2(offs)			\
12 	((0x000368ull) + ((offs) & 3) * 0x1000000ull)
13 #define CVMX_LMCX_BIST_RESULT(offs)			\
14 	((0x0000F8ull) + ((offs) & 1) * 0x60000000ull)
15 #define CVMX_LMCX_CHAR_CTL(offs)			\
16 	((0x000220ull) + ((offs) & 3) * 0x1000000ull)
17 #define CVMX_LMCX_CHAR_DQ_ERR_COUNT(offs)		\
18 	((0x000040ull) + ((offs) & 3) * 0x1000000ull)
19 #define CVMX_LMCX_CHAR_MASK0(offs)			\
20 	((0x000228ull) + ((offs) & 3) * 0x1000000ull)
21 #define CVMX_LMCX_CHAR_MASK1(offs)			\
22 	((0x000230ull) + ((offs) & 3) * 0x1000000ull)
23 #define CVMX_LMCX_CHAR_MASK2(offs)			\
24 	((0x000238ull) + ((offs) & 3) * 0x1000000ull)
25 #define CVMX_LMCX_CHAR_MASK3(offs)			\
26 	((0x000240ull) + ((offs) & 3) * 0x1000000ull)
27 #define CVMX_LMCX_CHAR_MASK4(offs)			\
28 	((0x000318ull) + ((offs) & 3) * 0x1000000ull)
29 #define CVMX_LMCX_COMP_CTL(offs)			\
30 	((0x000028ull) + ((offs) & 1) * 0x60000000ull)
31 #define CVMX_LMCX_COMP_CTL2(offs)			\
32 	((0x0001B8ull) + ((offs) & 3) * 0x1000000ull)
33 #define CVMX_LMCX_CONFIG(offs)				\
34 	((0x000188ull) + ((offs) & 3) * 0x1000000ull)
35 #define CVMX_LMCX_CONTROL(offs)				\
36 	((0x000190ull) + ((offs) & 3) * 0x1000000ull)
37 #define CVMX_LMCX_CTL(offs)				\
38 	((0x000010ull) + ((offs) & 1) * 0x60000000ull)
39 #define CVMX_LMCX_CTL1(offs)				\
40 	((0x000090ull) + ((offs) & 1) * 0x60000000ull)
41 #define CVMX_LMCX_DBTRAIN_CTL(offs)			\
42 	((0x0003F8ull) + ((offs) & 3) * 0x1000000ull)
43 #define CVMX_LMCX_DCLK_CNT(offs)			\
44 	((0x0001E0ull) + ((offs) & 3) * 0x1000000ull)
45 #define CVMX_LMCX_DCLK_CNT_HI(offs)			\
46 	((0x000070ull) + ((offs) & 1) * 0x60000000ull)
47 #define CVMX_LMCX_DCLK_CNT_LO(offs)			\
48 	((0x000068ull) + ((offs) & 1) * 0x60000000ull)
49 #define CVMX_LMCX_DCLK_CTL(offs)			\
50 	((0x0000B8ull) + ((offs) & 1) * 0x60000000ull)
51 #define CVMX_LMCX_DDR2_CTL(offs)			\
52 	((0x000018ull) + ((offs) & 1) * 0x60000000ull)
53 #define CVMX_LMCX_DDR4_DIMM_CTL(offs)			\
54 	((0x0003F0ull) + ((offs) & 3) * 0x1000000ull)
55 #define CVMX_LMCX_DDR_PLL_CTL(offs)			\
56 	((0x000258ull) + ((offs) & 3) * 0x1000000ull)
57 #define CVMX_LMCX_DELAY_CFG(offs)			\
58 	((0x000088ull) + ((offs) & 1) * 0x60000000ull)
59 #define CVMX_LMCX_DIMMX_DDR4_PARAMS0(offs, id)				\
60 	((0x0000D0ull) + (((offs) & 1) + ((id) & 3) * 0x200000ull) * 8)
61 #define CVMX_LMCX_DIMMX_DDR4_PARAMS1(offs, id)				\
62 	((0x000140ull) + (((offs) & 1) + ((id) & 3) * 0x200000ull) * 8)
63 #define CVMX_LMCX_DIMMX_PARAMS(offs, id)				\
64 	((0x000270ull) + (((offs) & 1) + ((id) & 3) * 0x200000ull) * 8)
65 #define CVMX_LMCX_DIMM_CTL(offs)			\
66 	((0x000310ull) + ((offs) & 3) * 0x1000000ull)
67 #define CVMX_LMCX_DLL_CTL(offs)				\
68 	((0x0000C0ull) + ((offs) & 1) * 0x60000000ull)
69 #define CVMX_LMCX_DLL_CTL2(offs)			\
70 	((0x0001C8ull) + ((offs) & 3) * 0x1000000ull)
71 #define CVMX_LMCX_DLL_CTL3(offs)			\
72 	((0x000218ull) + ((offs) & 3) * 0x1000000ull)
73 #define CVMX_LMCX_ECC_PARITY_TEST(offs)			\
74 	((0x000108ull) + ((offs) & 3) * 0x1000000ull)
75 #define CVMX_LMCX_EXT_CONFIG(offs)			\
76 	((0x000030ull) + ((offs) & 3) * 0x1000000ull)
77 #define CVMX_LMCX_EXT_CONFIG2(offs)			\
78 	((0x000090ull) + ((offs) & 3) * 0x1000000ull)
79 #define CVMX_LMCX_GENERAL_PURPOSE0(offs)		\
80 	((0x000340ull) + ((offs) & 3) * 0x1000000ull)
81 #define CVMX_LMCX_GENERAL_PURPOSE1(offs)		\
82 	((0x000348ull) + ((offs) & 3) * 0x1000000ull)
83 #define CVMX_LMCX_GENERAL_PURPOSE2(offs)		\
84 	((0x000350ull) + ((offs) & 3) * 0x1000000ull)
85 #define CVMX_LMCX_IFB_CNT(offs)				\
86 	((0x0001D0ull) + ((offs) & 3) * 0x1000000ull)
87 #define CVMX_LMCX_IFB_CNT_HI(offs)			\
88 	((0x000050ull) + ((offs) & 1) * 0x60000000ull)
89 #define CVMX_LMCX_IFB_CNT_LO(offs)			\
90 	((0x000048ull) + ((offs) & 1) * 0x60000000ull)
91 #define CVMX_LMCX_INT(offs)				\
92 	((0x0001F0ull) + ((offs) & 3) * 0x1000000ull)
93 #define CVMX_LMCX_INT_EN(offs)				\
94 	((0x0001E8ull) + ((offs) & 3) * 0x1000000ull)
95 #define CVMX_LMCX_LANEX_CRC_SWIZ(x, id)					\
96 	((0x000380ull) + (((offs) & 15) + ((id) & 3) * 0x200000ull) * 8)
97 #define CVMX_LMCX_MEM_CFG0(offs)			\
98 	((0x000000ull) + ((offs) & 1) * 0x60000000ull)
99 #define CVMX_LMCX_MEM_CFG1(offs)			\
100 	((0x000008ull) + ((offs) & 1) * 0x60000000ull)
101 #define CVMX_LMCX_MODEREG_PARAMS0(offs)			\
102 	((0x0001A8ull) + ((offs) & 3) * 0x1000000ull)
103 #define CVMX_LMCX_MODEREG_PARAMS1(offs)			\
104 	((0x000260ull) + ((offs) & 3) * 0x1000000ull)
105 #define CVMX_LMCX_MODEREG_PARAMS2(offs)			\
106 	((0x000050ull) + ((offs) & 3) * 0x1000000ull)
107 #define CVMX_LMCX_MODEREG_PARAMS3(offs)			\
108 	((0x000058ull) + ((offs) & 3) * 0x1000000ull)
109 #define CVMX_LMCX_MPR_DATA0(offs)			\
110 	((0x000070ull) + ((offs) & 3) * 0x1000000ull)
111 #define CVMX_LMCX_MPR_DATA1(offs)			\
112 	((0x000078ull) + ((offs) & 3) * 0x1000000ull)
113 #define CVMX_LMCX_MPR_DATA2(offs)			\
114 	((0x000080ull) + ((offs) & 3) * 0x1000000ull)
115 #define CVMX_LMCX_MR_MPR_CTL(offs)			\
116 	((0x000068ull) + ((offs) & 3) * 0x1000000ull)
117 #define CVMX_LMCX_NS_CTL(offs)				\
118 	((0x000178ull) + ((offs) & 3) * 0x1000000ull)
119 
CVMX_LMCX_NXM(unsigned long offs)120 static inline uint64_t CVMX_LMCX_NXM(unsigned long offs)
121 {
122 	switch (cvmx_get_octeon_family()) {
123 	case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
124 	case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
125 	case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
126 	case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
127 	case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
128 		return (0x0000C8ull) + (offs) * 0x60000000ull;
129 	case OCTEON_CNF75XX & OCTEON_FAMILY_MASK:
130 	case OCTEON_CN73XX & OCTEON_FAMILY_MASK:
131 		return (0x0000C8ull) + (offs) * 0x1000000ull;
132 	case OCTEON_CN78XX & OCTEON_FAMILY_MASK:
133 		if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
134 			return (0x0000C8ull) + (offs) * 0x1000000ull;
135 		if (OCTEON_IS_MODEL(OCTEON_CN78XX))
136 			return (0x0000C8ull) + (offs) * 0x1000000ull;
137 	case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
138 		return (0x0000C8ull) + (offs) * 0x1000000ull;
139 	}
140 	return (0x0000C8ull) + (offs) * 0x1000000ull;
141 }
142 
143 #define CVMX_LMCX_NXM_FADR(offs)			\
144 	((0x000028ull) + ((offs) & 3) * 0x1000000ull)
145 #define CVMX_LMCX_OPS_CNT(offs)				\
146 	((0x0001D8ull) + ((offs) & 3) * 0x1000000ull)
147 #define CVMX_LMCX_OPS_CNT_HI(offs)			\
148 	((0x000060ull) + ((offs) & 1) * 0x60000000ull)
149 #define CVMX_LMCX_OPS_CNT_LO(offs)			\
150 	((0x000058ull) + ((offs) & 1) * 0x60000000ull)
151 #define CVMX_LMCX_PHY_CTL(offs)				\
152 	((0x000210ull) + ((offs) & 3) * 0x1000000ull)
153 #define CVMX_LMCX_PHY_CTL2(offs)			\
154 	((0x000250ull) + ((offs) & 3) * 0x1000000ull)
155 #define CVMX_LMCX_PLL_BWCTL(offs)		\
156 	((0x000040ull))
157 #define CVMX_LMCX_PLL_CTL(offs)				\
158 	((0x0000A8ull) + ((offs) & 1) * 0x60000000ull)
159 #define CVMX_LMCX_PLL_STATUS(offs)			\
160 	((0x0000B0ull) + ((offs) & 1) * 0x60000000ull)
161 #define CVMX_LMCX_PPR_CTL(offs)				\
162 	((0x0003E0ull) + ((offs) & 3) * 0x1000000ull)
163 #define CVMX_LMCX_READ_LEVEL_CTL(offs)			\
164 	((0x000140ull) + ((offs) & 1) * 0x60000000ull)
165 #define CVMX_LMCX_READ_LEVEL_DBG(offs)			\
166 	((0x000148ull) + ((offs) & 1) * 0x60000000ull)
167 #define CVMX_LMCX_READ_LEVEL_RANKX(offs, id)				\
168 	((0x000100ull) + (((offs) & 3) + ((id) & 1) * 0xC000000ull) * 8)
169 #define CVMX_LMCX_REF_STATUS(offs)			\
170 	((0x0000A0ull) + ((offs) & 3) * 0x1000000ull)
171 #define CVMX_LMCX_RESET_CTL(offs)			\
172 	((0x000180ull) + ((offs) & 3) * 0x1000000ull)
173 #define CVMX_LMCX_RETRY_CONFIG(offs)			\
174 	((0x000110ull) + ((offs) & 3) * 0x1000000ull)
175 #define CVMX_LMCX_RETRY_STATUS(offs)			\
176 	((0x000118ull) + ((offs) & 3) * 0x1000000ull)
177 #define CVMX_LMCX_RLEVEL_CTL(offs)			\
178 	((0x0002A0ull) + ((offs) & 3) * 0x1000000ull)
179 #define CVMX_LMCX_RLEVEL_DBG(offs)			\
180 	((0x0002A8ull) + ((offs) & 3) * 0x1000000ull)
181 #define CVMX_LMCX_RLEVEL_RANKX(offs, id)				\
182 	((0x000280ull) + (((offs) & 3) + ((id) & 3) * 0x200000ull) * 8)
183 #define CVMX_LMCX_RODT_COMP_CTL(offs)			\
184 	((0x0000A0ull) + ((offs) & 1) * 0x60000000ull)
185 #define CVMX_LMCX_RODT_CTL(offs)			\
186 	((0x000078ull) + ((offs) & 1) * 0x60000000ull)
187 #define CVMX_LMCX_RODT_MASK(offs)			\
188 	((0x000268ull) + ((offs) & 3) * 0x1000000ull)
189 #define CVMX_LMCX_SCRAMBLED_FADR(offs)			\
190 	((0x000330ull) + ((offs) & 3) * 0x1000000ull)
191 #define CVMX_LMCX_SCRAMBLE_CFG0(offs)			\
192 	((0x000320ull) + ((offs) & 3) * 0x1000000ull)
193 #define CVMX_LMCX_SCRAMBLE_CFG1(offs)			\
194 	((0x000328ull) + ((offs) & 3) * 0x1000000ull)
195 #define CVMX_LMCX_SCRAMBLE_CFG2(offs)			\
196 	((0x000338ull) + ((offs) & 3) * 0x1000000ull)
197 #define CVMX_LMCX_SEQ_CTL(offs)				\
198 	((0x000048ull) + ((offs) & 3) * 0x1000000ull)
199 #define CVMX_LMCX_SLOT_CTL0(offs)			\
200 	((0x0001F8ull) + ((offs) & 3) * 0x1000000ull)
201 #define CVMX_LMCX_SLOT_CTL1(offs)			\
202 	((0x000200ull) + ((offs) & 3) * 0x1000000ull)
203 #define CVMX_LMCX_SLOT_CTL2(offs)			\
204 	((0x000208ull) + ((offs) & 3) * 0x1000000ull)
205 #define CVMX_LMCX_SLOT_CTL3(offs)			\
206 	((0x000248ull) + ((offs) & 3) * 0x1000000ull)
207 #define CVMX_LMCX_TIMING_PARAMS0(offs)			\
208 	((0x000198ull) + ((offs) & 3) * 0x1000000ull)
209 #define CVMX_LMCX_TIMING_PARAMS1(offs)			\
210 	((0x0001A0ull) + ((offs) & 3) * 0x1000000ull)
211 #define CVMX_LMCX_TIMING_PARAMS2(offs)			\
212 	((0x000060ull) + ((offs) & 3) * 0x1000000ull)
213 #define CVMX_LMCX_TRO_CTL(offs)				\
214 	((0x000248ull) + ((offs) & 3) * 0x1000000ull)
215 #define CVMX_LMCX_TRO_STAT(offs)			\
216 	((0x000250ull) + ((offs) & 3) * 0x1000000ull)
217 #define CVMX_LMCX_WLEVEL_CTL(offs)			\
218 	((0x000300ull) + ((offs) & 3) * 0x1000000ull)
219 #define CVMX_LMCX_WLEVEL_DBG(offs)			\
220 	((0x000308ull) + ((offs) & 3) * 0x1000000ull)
221 
CVMX_LMCX_WLEVEL_RANKX(unsigned long offs,unsigned long id)222 static inline uint64_t CVMX_LMCX_WLEVEL_RANKX(unsigned long offs,
223 					      unsigned long id)
224 {
225 	switch (cvmx_get_octeon_family()) {
226 	case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
227 		return (0x0002C0ull) + ((offs) + (id) * 0x200000ull) * 8;
228 	case OCTEON_CNF75XX & OCTEON_FAMILY_MASK:
229 	case OCTEON_CN73XX & OCTEON_FAMILY_MASK:
230 		return (0x0002C0ull) + ((offs) + (id) * 0x200000ull) * 8;
231 	case OCTEON_CN78XX & OCTEON_FAMILY_MASK:
232 		if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
233 			return (0x0002C0ull) + ((offs) +
234 						(id) * 0x200000ull) * 8;
235 		if (OCTEON_IS_MODEL(OCTEON_CN78XX))
236 			return (0x0002C0ull) + ((offs) +
237 						(id) * 0x200000ull) * 8;
238 
239 	case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
240 	case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
241 		return (0x0002B0ull) + ((offs) + (id) * 0x0ull) * 8;
242 	case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
243 	case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
244 		return (0x0002B0ull) + ((offs) + (id) * 0x200000ull) * 8;
245 	case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
246 		return (0x0002B0ull) + ((offs) + (id) * 0x200000ull) * 8;
247 	}
248 	return (0x0002C0ull) + ((offs) + (id) * 0x200000ull) * 8;
249 }
250 
251 #define CVMX_LMCX_WODT_CTL0(offs)			\
252 	((0x000030ull) + ((offs) & 1) * 0x60000000ull)
253 #define CVMX_LMCX_WODT_CTL1(offs)			\
254 	((0x000080ull) + ((offs) & 1) * 0x60000000ull)
255 #define CVMX_LMCX_WODT_MASK(offs)			\
256 	((0x0001B0ull) + ((offs) & 3) * 0x1000000ull)
257 
258 /**
259  * cvmx_lmc#_char_ctl
260  *
261  * This register provides an assortment of various control fields needed
262  * to characterize the DDR3 interface.
263  */
264 union cvmx_lmcx_char_ctl {
265 	u64 u64;
266 	struct cvmx_lmcx_char_ctl_s {
267 		uint64_t reserved_54_63:10;
268 		uint64_t dq_char_byte_check:1;
269 		uint64_t dq_char_check_lock:1;
270 		uint64_t dq_char_check_enable:1;
271 		uint64_t dq_char_bit_sel:3;
272 		uint64_t dq_char_byte_sel:4;
273 		uint64_t dr:1;
274 		uint64_t skew_on:1;
275 		uint64_t en:1;
276 		uint64_t sel:1;
277 		uint64_t prog:8;
278 		uint64_t prbs:32;
279 	} s;
280 	struct cvmx_lmcx_char_ctl_cn61xx {
281 		uint64_t reserved_44_63:20;
282 		uint64_t dr:1;
283 		uint64_t skew_on:1;
284 		uint64_t en:1;
285 		uint64_t sel:1;
286 		uint64_t prog:8;
287 		uint64_t prbs:32;
288 	} cn61xx;
289 	struct cvmx_lmcx_char_ctl_cn63xx {
290 		uint64_t reserved_42_63:22;
291 		uint64_t en:1;
292 		uint64_t sel:1;
293 		uint64_t prog:8;
294 		uint64_t prbs:32;
295 	} cn63xx;
296 	struct cvmx_lmcx_char_ctl_cn63xx cn63xxp1;
297 	struct cvmx_lmcx_char_ctl_cn61xx cn66xx;
298 	struct cvmx_lmcx_char_ctl_cn61xx cn68xx;
299 	struct cvmx_lmcx_char_ctl_cn63xx cn68xxp1;
300 	struct cvmx_lmcx_char_ctl_cn70xx {
301 		uint64_t reserved_53_63:11;
302 		uint64_t dq_char_check_lock:1;
303 		uint64_t dq_char_check_enable:1;
304 		uint64_t dq_char_bit_sel:3;
305 		uint64_t dq_char_byte_sel:4;
306 		uint64_t dr:1;
307 		uint64_t skew_on:1;
308 		uint64_t en:1;
309 		uint64_t sel:1;
310 		uint64_t prog:8;
311 		uint64_t prbs:32;
312 	} cn70xx;
313 	struct cvmx_lmcx_char_ctl_cn70xx cn70xxp1;
314 	struct cvmx_lmcx_char_ctl_s cn73xx;
315 	struct cvmx_lmcx_char_ctl_s cn78xx;
316 	struct cvmx_lmcx_char_ctl_s cn78xxp1;
317 	struct cvmx_lmcx_char_ctl_cn61xx cnf71xx;
318 	struct cvmx_lmcx_char_ctl_s cnf75xx;
319 };
320 
321 /**
322  * cvmx_lmc#_comp_ctl2
323  *
324  * LMC_COMP_CTL2 = LMC Compensation control
325  *
326  */
327 union cvmx_lmcx_comp_ctl2 {
328 	u64 u64;
329 	struct cvmx_lmcx_comp_ctl2_s {
330 		uint64_t reserved_51_63:13;
331 		uint64_t rclk_char_mode:1;
332 		uint64_t reserved_40_49:10;
333 		uint64_t ptune_offset:4;
334 		uint64_t reserved_12_35:24;
335 		uint64_t cmd_ctl:4;
336 		uint64_t ck_ctl:4;
337 		uint64_t dqx_ctl:4;
338 	} s;
339 	struct cvmx_lmcx_comp_ctl2_cn61xx {
340 		uint64_t reserved_34_63:30;
341 		uint64_t ddr__ptune:4;
342 		uint64_t ddr__ntune:4;
343 		uint64_t m180:1;
344 		uint64_t byp:1;
345 		uint64_t ptune:4;
346 		uint64_t ntune:4;
347 		uint64_t rodt_ctl:4;
348 		uint64_t cmd_ctl:4;
349 		uint64_t ck_ctl:4;
350 		uint64_t dqx_ctl:4;
351 	} cn61xx;
352 	struct cvmx_lmcx_comp_ctl2_cn61xx cn63xx;
353 	struct cvmx_lmcx_comp_ctl2_cn61xx cn63xxp1;
354 	struct cvmx_lmcx_comp_ctl2_cn61xx cn66xx;
355 	struct cvmx_lmcx_comp_ctl2_cn61xx cn68xx;
356 	struct cvmx_lmcx_comp_ctl2_cn61xx cn68xxp1;
357 	struct cvmx_lmcx_comp_ctl2_cn70xx {
358 		uint64_t reserved_51_63:13;
359 		uint64_t rclk_char_mode:1;
360 		uint64_t ddr__ptune:5;
361 		uint64_t ddr__ntune:5;
362 		uint64_t ptune_offset:4;
363 		uint64_t ntune_offset:4;
364 		uint64_t m180:1;
365 		uint64_t byp:1;
366 		uint64_t ptune:5;
367 		uint64_t ntune:5;
368 		uint64_t rodt_ctl:4;
369 		uint64_t control_ctl:4;
370 		uint64_t cmd_ctl:4;
371 		uint64_t ck_ctl:4;
372 		uint64_t dqx_ctl:4;
373 	} cn70xx;
374 	struct cvmx_lmcx_comp_ctl2_cn70xx cn70xxp1;
375 	struct cvmx_lmcx_comp_ctl2_cn70xx cn73xx;
376 	struct cvmx_lmcx_comp_ctl2_cn70xx cn78xx;
377 	struct cvmx_lmcx_comp_ctl2_cn70xx cn78xxp1;
378 	struct cvmx_lmcx_comp_ctl2_cn61xx cnf71xx;
379 	struct cvmx_lmcx_comp_ctl2_cn70xx cnf75xx;
380 };
381 
382 /**
383  * cvmx_lmc#_config
384  *
385  * This register controls certain parameters required for memory configuration.
386  * Note the following:
387  * * Priority order for hardware write operations to
388  * LMC()_CONFIG/LMC()_FADR/LMC()_ECC_SYND: DED error > SEC error.
389  * * The self-refresh entry sequence(s) power the DLL up/down (depending on
390  * LMC()_MODEREG_PARAMS0[DLL]) when LMC()_CONFIG[SREF_WITH_DLL] is set.
391  * * Prior to the self-refresh exit sequence, LMC()_MODEREG_PARAMS0 should
392  * be reprogrammed
393  * (if needed) to the appropriate values.
394  *
395  * See LMC initialization sequence for the LMC bringup sequence.
396  */
397 union cvmx_lmcx_config {
398 	u64 u64;
399 	struct cvmx_lmcx_config_s {
400 		uint64_t lrdimm_ena:1;
401 		uint64_t bg2_enable:1;
402 		uint64_t mode_x4dev:1;
403 		uint64_t mode32b:1;
404 		uint64_t scrz:1;
405 		uint64_t early_unload_d1_r1:1;
406 		uint64_t early_unload_d1_r0:1;
407 		uint64_t early_unload_d0_r1:1;
408 		uint64_t early_unload_d0_r0:1;
409 		uint64_t init_status:4;
410 		uint64_t mirrmask:4;
411 		uint64_t rankmask:4;
412 		uint64_t rank_ena:1;
413 		uint64_t sref_with_dll:1;
414 		uint64_t early_dqx:1;
415 		uint64_t reserved_18_39:22;
416 		uint64_t reset:1;
417 		uint64_t ecc_adr:1;
418 		uint64_t forcewrite:4;
419 		uint64_t idlepower:3;
420 		uint64_t pbank_lsb:4;
421 		uint64_t row_lsb:3;
422 		uint64_t ecc_ena:1;
423 		uint64_t init_start:1;
424 	} s;
425 	struct cvmx_lmcx_config_cn61xx {
426 		uint64_t reserved_61_63:3;
427 		uint64_t mode32b:1;
428 		uint64_t scrz:1;
429 		uint64_t early_unload_d1_r1:1;
430 		uint64_t early_unload_d1_r0:1;
431 		uint64_t early_unload_d0_r1:1;
432 		uint64_t early_unload_d0_r0:1;
433 		uint64_t init_status:4;
434 		uint64_t mirrmask:4;
435 		uint64_t rankmask:4;
436 		uint64_t rank_ena:1;
437 		uint64_t sref_with_dll:1;
438 		uint64_t early_dqx:1;
439 		uint64_t sequence:3;
440 		uint64_t ref_zqcs_int:19;
441 		uint64_t reset:1;
442 		uint64_t ecc_adr:1;
443 		uint64_t forcewrite:4;
444 		uint64_t idlepower:3;
445 		uint64_t pbank_lsb:4;
446 		uint64_t row_lsb:3;
447 		uint64_t ecc_ena:1;
448 		uint64_t init_start:1;
449 	} cn61xx;
450 	struct cvmx_lmcx_config_cn63xx {
451 		uint64_t reserved_59_63:5;
452 		uint64_t early_unload_d1_r1:1;
453 		uint64_t early_unload_d1_r0:1;
454 		uint64_t early_unload_d0_r1:1;
455 		uint64_t early_unload_d0_r0:1;
456 		uint64_t init_status:4;
457 		uint64_t mirrmask:4;
458 		uint64_t rankmask:4;
459 		uint64_t rank_ena:1;
460 		uint64_t sref_with_dll:1;
461 		uint64_t early_dqx:1;
462 		uint64_t sequence:3;
463 		uint64_t ref_zqcs_int:19;
464 		uint64_t reset:1;
465 		uint64_t ecc_adr:1;
466 		uint64_t forcewrite:4;
467 		uint64_t idlepower:3;
468 		uint64_t pbank_lsb:4;
469 		uint64_t row_lsb:3;
470 		uint64_t ecc_ena:1;
471 		uint64_t init_start:1;
472 	} cn63xx;
473 	struct cvmx_lmcx_config_cn63xxp1 {
474 		uint64_t reserved_55_63:9;
475 		uint64_t init_status:4;
476 		uint64_t mirrmask:4;
477 		uint64_t rankmask:4;
478 		uint64_t rank_ena:1;
479 		uint64_t sref_with_dll:1;
480 		uint64_t early_dqx:1;
481 		uint64_t sequence:3;
482 		uint64_t ref_zqcs_int:19;
483 		uint64_t reset:1;
484 		uint64_t ecc_adr:1;
485 		uint64_t forcewrite:4;
486 		uint64_t idlepower:3;
487 		uint64_t pbank_lsb:4;
488 		uint64_t row_lsb:3;
489 		uint64_t ecc_ena:1;
490 		uint64_t init_start:1;
491 	} cn63xxp1;
492 	struct cvmx_lmcx_config_cn66xx {
493 		uint64_t reserved_60_63:4;
494 		uint64_t scrz:1;
495 		uint64_t early_unload_d1_r1:1;
496 		uint64_t early_unload_d1_r0:1;
497 		uint64_t early_unload_d0_r1:1;
498 		uint64_t early_unload_d0_r0:1;
499 		uint64_t init_status:4;
500 		uint64_t mirrmask:4;
501 		uint64_t rankmask:4;
502 		uint64_t rank_ena:1;
503 		uint64_t sref_with_dll:1;
504 		uint64_t early_dqx:1;
505 		uint64_t sequence:3;
506 		uint64_t ref_zqcs_int:19;
507 		uint64_t reset:1;
508 		uint64_t ecc_adr:1;
509 		uint64_t forcewrite:4;
510 		uint64_t idlepower:3;
511 		uint64_t pbank_lsb:4;
512 		uint64_t row_lsb:3;
513 		uint64_t ecc_ena:1;
514 		uint64_t init_start:1;
515 	} cn66xx;
516 	struct cvmx_lmcx_config_cn63xx cn68xx;
517 	struct cvmx_lmcx_config_cn63xx cn68xxp1;
518 	struct cvmx_lmcx_config_cn70xx {
519 		uint64_t reserved_63_63:1;
520 		uint64_t bg2_enable:1;
521 		uint64_t mode_x4dev:1;
522 		uint64_t mode32b:1;
523 		uint64_t scrz:1;
524 		uint64_t early_unload_d1_r1:1;
525 		uint64_t early_unload_d1_r0:1;
526 		uint64_t early_unload_d0_r1:1;
527 		uint64_t early_unload_d0_r0:1;
528 		uint64_t init_status:4;
529 		uint64_t mirrmask:4;
530 		uint64_t rankmask:4;
531 		uint64_t rank_ena:1;
532 		uint64_t sref_with_dll:1;
533 		uint64_t early_dqx:1;
534 		uint64_t ref_zqcs_int:22;
535 		uint64_t reset:1;
536 		uint64_t ecc_adr:1;
537 		uint64_t forcewrite:4;
538 		uint64_t idlepower:3;
539 		uint64_t pbank_lsb:4;
540 		uint64_t row_lsb:3;
541 		uint64_t ecc_ena:1;
542 		uint64_t reserved_0_0:1;
543 	} cn70xx;
544 	struct cvmx_lmcx_config_cn70xx cn70xxp1;
545 	struct cvmx_lmcx_config_cn73xx {
546 		uint64_t lrdimm_ena:1;
547 		uint64_t bg2_enable:1;
548 		uint64_t mode_x4dev:1;
549 		uint64_t mode32b:1;
550 		uint64_t scrz:1;
551 		uint64_t early_unload_d1_r1:1;
552 		uint64_t early_unload_d1_r0:1;
553 		uint64_t early_unload_d0_r1:1;
554 		uint64_t early_unload_d0_r0:1;
555 		uint64_t init_status:4;
556 		uint64_t mirrmask:4;
557 		uint64_t rankmask:4;
558 		uint64_t rank_ena:1;
559 		uint64_t sref_with_dll:1;
560 		uint64_t early_dqx:1;
561 		uint64_t ref_zqcs_int:22;
562 		uint64_t reset:1;
563 		uint64_t ecc_adr:1;
564 		uint64_t forcewrite:4;
565 		uint64_t idlepower:3;
566 		uint64_t pbank_lsb:4;
567 		uint64_t row_lsb:3;
568 		uint64_t ecc_ena:1;
569 		uint64_t reserved_0_0:1;
570 	} cn73xx;
571 	struct cvmx_lmcx_config_cn73xx cn78xx;
572 	struct cvmx_lmcx_config_cn73xx cn78xxp1;
573 	struct cvmx_lmcx_config_cn61xx cnf71xx;
574 	struct cvmx_lmcx_config_cn73xx cnf75xx;
575 };
576 
577 /**
578  * cvmx_lmc#_control
579  *
580  * LMC_CONTROL = LMC Control
581  * This register is an assortment of various control fields needed by the
582  * memory controller
583  */
584 union cvmx_lmcx_control {
585 	u64 u64;
586 	struct cvmx_lmcx_control_s {
587 		uint64_t scramble_ena:1;
588 		uint64_t thrcnt:12;
589 		uint64_t persub:8;
590 		uint64_t thrmax:4;
591 		uint64_t crm_cnt:5;
592 		uint64_t crm_thr:5;
593 		uint64_t crm_max:5;
594 		uint64_t rodt_bprch:1;
595 		uint64_t wodt_bprch:1;
596 		uint64_t bprch:2;
597 		uint64_t ext_zqcs_dis:1;
598 		uint64_t int_zqcs_dis:1;
599 		uint64_t auto_dclkdis:1;
600 		uint64_t xor_bank:1;
601 		uint64_t max_write_batch:4;
602 		uint64_t nxm_write_en:1;
603 		uint64_t elev_prio_dis:1;
604 		uint64_t inorder_wr:1;
605 		uint64_t inorder_rd:1;
606 		uint64_t throttle_wr:1;
607 		uint64_t throttle_rd:1;
608 		uint64_t fprch2:2;
609 		uint64_t pocas:1;
610 		uint64_t ddr2t:1;
611 		uint64_t bwcnt:1;
612 		uint64_t rdimm_ena:1;
613 	} s;
614 	struct cvmx_lmcx_control_s cn61xx;
615 	struct cvmx_lmcx_control_cn63xx {
616 		uint64_t reserved_24_63:40;
617 		uint64_t rodt_bprch:1;
618 		uint64_t wodt_bprch:1;
619 		uint64_t bprch:2;
620 		uint64_t ext_zqcs_dis:1;
621 		uint64_t int_zqcs_dis:1;
622 		uint64_t auto_dclkdis:1;
623 		uint64_t xor_bank:1;
624 		uint64_t max_write_batch:4;
625 		uint64_t nxm_write_en:1;
626 		uint64_t elev_prio_dis:1;
627 		uint64_t inorder_wr:1;
628 		uint64_t inorder_rd:1;
629 		uint64_t throttle_wr:1;
630 		uint64_t throttle_rd:1;
631 		uint64_t fprch2:2;
632 		uint64_t pocas:1;
633 		uint64_t ddr2t:1;
634 		uint64_t bwcnt:1;
635 		uint64_t rdimm_ena:1;
636 	} cn63xx;
637 	struct cvmx_lmcx_control_cn63xx cn63xxp1;
638 	struct cvmx_lmcx_control_cn66xx {
639 		uint64_t scramble_ena:1;
640 		uint64_t reserved_24_62:39;
641 		uint64_t rodt_bprch:1;
642 		uint64_t wodt_bprch:1;
643 		uint64_t bprch:2;
644 		uint64_t ext_zqcs_dis:1;
645 		uint64_t int_zqcs_dis:1;
646 		uint64_t auto_dclkdis:1;
647 		uint64_t xor_bank:1;
648 		uint64_t max_write_batch:4;
649 		uint64_t nxm_write_en:1;
650 		uint64_t elev_prio_dis:1;
651 		uint64_t inorder_wr:1;
652 		uint64_t inorder_rd:1;
653 		uint64_t throttle_wr:1;
654 		uint64_t throttle_rd:1;
655 		uint64_t fprch2:2;
656 		uint64_t pocas:1;
657 		uint64_t ddr2t:1;
658 		uint64_t bwcnt:1;
659 		uint64_t rdimm_ena:1;
660 	} cn66xx;
661 	struct cvmx_lmcx_control_cn68xx {
662 		uint64_t reserved_63_63:1;
663 		uint64_t thrcnt:12;
664 		uint64_t persub:8;
665 		uint64_t thrmax:4;
666 		uint64_t crm_cnt:5;
667 		uint64_t crm_thr:5;
668 		uint64_t crm_max:5;
669 		uint64_t rodt_bprch:1;
670 		uint64_t wodt_bprch:1;
671 		uint64_t bprch:2;
672 		uint64_t ext_zqcs_dis:1;
673 		uint64_t int_zqcs_dis:1;
674 		uint64_t auto_dclkdis:1;
675 		uint64_t xor_bank:1;
676 		uint64_t max_write_batch:4;
677 		uint64_t nxm_write_en:1;
678 		uint64_t elev_prio_dis:1;
679 		uint64_t inorder_wr:1;
680 		uint64_t inorder_rd:1;
681 		uint64_t throttle_wr:1;
682 		uint64_t throttle_rd:1;
683 		uint64_t fprch2:2;
684 		uint64_t pocas:1;
685 		uint64_t ddr2t:1;
686 		uint64_t bwcnt:1;
687 		uint64_t rdimm_ena:1;
688 	} cn68xx;
689 	struct cvmx_lmcx_control_cn68xx cn68xxp1;
690 	struct cvmx_lmcx_control_s cn70xx;
691 	struct cvmx_lmcx_control_s cn70xxp1;
692 	struct cvmx_lmcx_control_s cn73xx;
693 	struct cvmx_lmcx_control_s cn78xx;
694 	struct cvmx_lmcx_control_s cn78xxp1;
695 	struct cvmx_lmcx_control_cn66xx cnf71xx;
696 	struct cvmx_lmcx_control_s cnf75xx;
697 };
698 
699 /**
700  * cvmx_lmc#_ctl
701  *
702  * LMC_CTL = LMC Control
703  * This register is an assortment of various control fields needed by the
704  * memory controller
705  */
706 union cvmx_lmcx_ctl {
707 	u64 u64;
708 	struct cvmx_lmcx_ctl_s {
709 		uint64_t reserved_32_63:32;
710 		uint64_t ddr__nctl:4;
711 		uint64_t ddr__pctl:4;
712 		uint64_t slow_scf:1;
713 		uint64_t xor_bank:1;
714 		uint64_t max_write_batch:4;
715 		uint64_t pll_div2:1;
716 		uint64_t pll_bypass:1;
717 		uint64_t rdimm_ena:1;
718 		uint64_t r2r_slot:1;
719 		uint64_t inorder_mwf:1;
720 		uint64_t inorder_mrf:1;
721 		uint64_t reserved_10_11:2;
722 		uint64_t fprch2:1;
723 		uint64_t bprch:1;
724 		uint64_t sil_lat:2;
725 		uint64_t tskw:2;
726 		uint64_t qs_dic:2;
727 		uint64_t dic:2;
728 	} s;
729 	struct cvmx_lmcx_ctl_cn30xx {
730 		uint64_t reserved_32_63:32;
731 		uint64_t ddr__nctl:4;
732 		uint64_t ddr__pctl:4;
733 		uint64_t slow_scf:1;
734 		uint64_t xor_bank:1;
735 		uint64_t max_write_batch:4;
736 		uint64_t pll_div2:1;
737 		uint64_t pll_bypass:1;
738 		uint64_t rdimm_ena:1;
739 		uint64_t r2r_slot:1;
740 		uint64_t inorder_mwf:1;
741 		uint64_t inorder_mrf:1;
742 		uint64_t dreset:1;
743 		uint64_t mode32b:1;
744 		uint64_t fprch2:1;
745 		uint64_t bprch:1;
746 		uint64_t sil_lat:2;
747 		uint64_t tskw:2;
748 		uint64_t qs_dic:2;
749 		uint64_t dic:2;
750 	} cn30xx;
751 	struct cvmx_lmcx_ctl_cn30xx cn31xx;
752 	struct cvmx_lmcx_ctl_cn38xx {
753 		uint64_t reserved_32_63:32;
754 		uint64_t ddr__nctl:4;
755 		uint64_t ddr__pctl:4;
756 		uint64_t slow_scf:1;
757 		uint64_t xor_bank:1;
758 		uint64_t max_write_batch:4;
759 		uint64_t reserved_16_17:2;
760 		uint64_t rdimm_ena:1;
761 		uint64_t r2r_slot:1;
762 		uint64_t inorder_mwf:1;
763 		uint64_t inorder_mrf:1;
764 		uint64_t set_zero:1;
765 		uint64_t mode128b:1;
766 		uint64_t fprch2:1;
767 		uint64_t bprch:1;
768 		uint64_t sil_lat:2;
769 		uint64_t tskw:2;
770 		uint64_t qs_dic:2;
771 		uint64_t dic:2;
772 	} cn38xx;
773 	struct cvmx_lmcx_ctl_cn38xx cn38xxp2;
774 	struct cvmx_lmcx_ctl_cn50xx {
775 		uint64_t reserved_32_63:32;
776 		uint64_t ddr__nctl:4;
777 		uint64_t ddr__pctl:4;
778 		uint64_t slow_scf:1;
779 		uint64_t xor_bank:1;
780 		uint64_t max_write_batch:4;
781 		uint64_t reserved_17_17:1;
782 		uint64_t pll_bypass:1;
783 		uint64_t rdimm_ena:1;
784 		uint64_t r2r_slot:1;
785 		uint64_t inorder_mwf:1;
786 		uint64_t inorder_mrf:1;
787 		uint64_t dreset:1;
788 		uint64_t mode32b:1;
789 		uint64_t fprch2:1;
790 		uint64_t bprch:1;
791 		uint64_t sil_lat:2;
792 		uint64_t tskw:2;
793 		uint64_t qs_dic:2;
794 		uint64_t dic:2;
795 	} cn50xx;
796 	struct cvmx_lmcx_ctl_cn52xx {
797 		uint64_t reserved_32_63:32;
798 		uint64_t ddr__nctl:4;
799 		uint64_t ddr__pctl:4;
800 		uint64_t slow_scf:1;
801 		uint64_t xor_bank:1;
802 		uint64_t max_write_batch:4;
803 		uint64_t reserved_16_17:2;
804 		uint64_t rdimm_ena:1;
805 		uint64_t r2r_slot:1;
806 		uint64_t inorder_mwf:1;
807 		uint64_t inorder_mrf:1;
808 		uint64_t dreset:1;
809 		uint64_t mode32b:1;
810 		uint64_t fprch2:1;
811 		uint64_t bprch:1;
812 		uint64_t sil_lat:2;
813 		uint64_t tskw:2;
814 		uint64_t qs_dic:2;
815 		uint64_t dic:2;
816 	} cn52xx;
817 	struct cvmx_lmcx_ctl_cn52xx cn52xxp1;
818 	struct cvmx_lmcx_ctl_cn52xx cn56xx;
819 	struct cvmx_lmcx_ctl_cn52xx cn56xxp1;
820 	struct cvmx_lmcx_ctl_cn58xx {
821 		uint64_t reserved_32_63:32;
822 		uint64_t ddr__nctl:4;
823 		uint64_t ddr__pctl:4;
824 		uint64_t slow_scf:1;
825 		uint64_t xor_bank:1;
826 		uint64_t max_write_batch:4;
827 		uint64_t reserved_16_17:2;
828 		uint64_t rdimm_ena:1;
829 		uint64_t r2r_slot:1;
830 		uint64_t inorder_mwf:1;
831 		uint64_t inorder_mrf:1;
832 		uint64_t dreset:1;
833 		uint64_t mode128b:1;
834 		uint64_t fprch2:1;
835 		uint64_t bprch:1;
836 		uint64_t sil_lat:2;
837 		uint64_t tskw:2;
838 		uint64_t qs_dic:2;
839 		uint64_t dic:2;
840 	} cn58xx;
841 	struct cvmx_lmcx_ctl_cn58xx cn58xxp1;
842 };
843 
844 /**
845  * cvmx_lmc#_ctl1
846  *
847  * LMC_CTL1 = LMC Control1
848  * This register is an assortment of various control fields needed by the
849  * memory controller
850  */
851 union cvmx_lmcx_ctl1 {
852 	u64 u64;
853 	struct cvmx_lmcx_ctl1_s {
854 		uint64_t reserved_21_63:43;
855 		uint64_t ecc_adr:1;
856 		uint64_t forcewrite:4;
857 		uint64_t idlepower:3;
858 		uint64_t sequence:3;
859 		uint64_t sil_mode:1;
860 		uint64_t dcc_enable:1;
861 		uint64_t reserved_2_7:6;
862 		uint64_t data_layout:2;
863 	} s;
864 	struct cvmx_lmcx_ctl1_cn30xx {
865 		uint64_t reserved_2_63:62;
866 		uint64_t data_layout:2;
867 	} cn30xx;
868 	struct cvmx_lmcx_ctl1_cn50xx {
869 		uint64_t reserved_10_63:54;
870 		uint64_t sil_mode:1;
871 		uint64_t dcc_enable:1;
872 		uint64_t reserved_2_7:6;
873 		uint64_t data_layout:2;
874 	} cn50xx;
875 	struct cvmx_lmcx_ctl1_cn52xx {
876 		uint64_t reserved_21_63:43;
877 		uint64_t ecc_adr:1;
878 		uint64_t forcewrite:4;
879 		uint64_t idlepower:3;
880 		uint64_t sequence:3;
881 		uint64_t sil_mode:1;
882 		uint64_t dcc_enable:1;
883 		uint64_t reserved_0_7:8;
884 	} cn52xx;
885 	struct cvmx_lmcx_ctl1_cn52xx cn52xxp1;
886 	struct cvmx_lmcx_ctl1_cn52xx cn56xx;
887 	struct cvmx_lmcx_ctl1_cn52xx cn56xxp1;
888 	struct cvmx_lmcx_ctl1_cn58xx {
889 		uint64_t reserved_10_63:54;
890 		uint64_t sil_mode:1;
891 		uint64_t dcc_enable:1;
892 		uint64_t reserved_0_7:8;
893 	} cn58xx;
894 	struct cvmx_lmcx_ctl1_cn58xx cn58xxp1;
895 };
896 
897 /**
898  * cvmx_lmc#_dbtrain_ctl
899  *
900  * Reserved.
901  *
902  */
903 union cvmx_lmcx_dbtrain_ctl {
904 	u64 u64;
905 	struct cvmx_lmcx_dbtrain_ctl_s {
906 		uint64_t reserved_63_63:1;
907 		uint64_t lfsr_pattern_sel:1;
908 		uint64_t cmd_count_ext:2;
909 		uint64_t db_output_impedance:3;
910 		uint64_t db_sel:1;
911 		uint64_t tccd_sel:1;
912 		uint64_t rw_train:1;
913 		uint64_t read_dq_count:7;
914 		uint64_t read_cmd_count:5;
915 		uint64_t write_ena:1;
916 		uint64_t activate:1;
917 		uint64_t prank:2;
918 		uint64_t lrank:3;
919 		uint64_t row_a:18;
920 		uint64_t bg:2;
921 		uint64_t ba:2;
922 		uint64_t column_a:13;
923 	} s;
924 	struct cvmx_lmcx_dbtrain_ctl_cn73xx {
925 		uint64_t reserved_60_63:4;
926 		uint64_t db_output_impedance:3;
927 		uint64_t db_sel:1;
928 		uint64_t tccd_sel:1;
929 		uint64_t rw_train:1;
930 		uint64_t read_dq_count:7;
931 		uint64_t read_cmd_count:5;
932 		uint64_t write_ena:1;
933 		uint64_t activate:1;
934 		uint64_t prank:2;
935 		uint64_t lrank:3;
936 		uint64_t row_a:18;
937 		uint64_t bg:2;
938 		uint64_t ba:2;
939 		uint64_t column_a:13;
940 	} cn73xx;
941 	struct cvmx_lmcx_dbtrain_ctl_s cn78xx;
942 	struct cvmx_lmcx_dbtrain_ctl_cnf75xx {
943 		uint64_t reserved_62_63:2;
944 		uint64_t cmd_count_ext:2;
945 		uint64_t db_output_impedance:3;
946 		uint64_t db_sel:1;
947 		uint64_t tccd_sel:1;
948 		uint64_t rw_train:1;
949 		uint64_t read_dq_count:7;
950 		uint64_t read_cmd_count:5;
951 		uint64_t write_ena:1;
952 		uint64_t activate:1;
953 		uint64_t prank:2;
954 		uint64_t lrank:3;
955 		uint64_t row_a:18;
956 		uint64_t bg:2;
957 		uint64_t ba:2;
958 		uint64_t column_a:13;
959 	} cnf75xx;
960 };
961 
962 /**
963  * cvmx_lmc#_dclk_cnt
964  *
965  * LMC_DCLK_CNT  = Performance Counters
966  *
967  */
968 union cvmx_lmcx_dclk_cnt {
969 	u64 u64;
970 	struct cvmx_lmcx_dclk_cnt_s {
971 		uint64_t dclkcnt:64;
972 	} s;
973 	struct cvmx_lmcx_dclk_cnt_s cn61xx;
974 	struct cvmx_lmcx_dclk_cnt_s cn63xx;
975 	struct cvmx_lmcx_dclk_cnt_s cn63xxp1;
976 	struct cvmx_lmcx_dclk_cnt_s cn66xx;
977 	struct cvmx_lmcx_dclk_cnt_s cn68xx;
978 	struct cvmx_lmcx_dclk_cnt_s cn68xxp1;
979 	struct cvmx_lmcx_dclk_cnt_s cn70xx;
980 	struct cvmx_lmcx_dclk_cnt_s cn70xxp1;
981 	struct cvmx_lmcx_dclk_cnt_s cn73xx;
982 	struct cvmx_lmcx_dclk_cnt_s cn78xx;
983 	struct cvmx_lmcx_dclk_cnt_s cn78xxp1;
984 	struct cvmx_lmcx_dclk_cnt_s cnf71xx;
985 	struct cvmx_lmcx_dclk_cnt_s cnf75xx;
986 };
987 
988 /**
989  * cvmx_lmc#_dclk_cnt_hi
990  *
991  * LMC_DCLK_CNT_HI  = Performance Counters
992  *
993  */
994 union cvmx_lmcx_dclk_cnt_hi {
995 	u64 u64;
996 	struct cvmx_lmcx_dclk_cnt_hi_s {
997 		uint64_t reserved_32_63:32;
998 		uint64_t dclkcnt_hi:32;
999 	} s;
1000 	struct cvmx_lmcx_dclk_cnt_hi_s cn30xx;
1001 	struct cvmx_lmcx_dclk_cnt_hi_s cn31xx;
1002 	struct cvmx_lmcx_dclk_cnt_hi_s cn38xx;
1003 	struct cvmx_lmcx_dclk_cnt_hi_s cn38xxp2;
1004 	struct cvmx_lmcx_dclk_cnt_hi_s cn50xx;
1005 	struct cvmx_lmcx_dclk_cnt_hi_s cn52xx;
1006 	struct cvmx_lmcx_dclk_cnt_hi_s cn52xxp1;
1007 	struct cvmx_lmcx_dclk_cnt_hi_s cn56xx;
1008 	struct cvmx_lmcx_dclk_cnt_hi_s cn56xxp1;
1009 	struct cvmx_lmcx_dclk_cnt_hi_s cn58xx;
1010 	struct cvmx_lmcx_dclk_cnt_hi_s cn58xxp1;
1011 };
1012 
1013 /**
1014  * cvmx_lmc#_dclk_cnt_lo
1015  *
1016  * LMC_DCLK_CNT_LO  = Performance Counters
1017  *
1018  */
1019 union cvmx_lmcx_dclk_cnt_lo {
1020 	u64 u64;
1021 	struct cvmx_lmcx_dclk_cnt_lo_s {
1022 		uint64_t reserved_32_63:32;
1023 		uint64_t dclkcnt_lo:32;
1024 	} s;
1025 	struct cvmx_lmcx_dclk_cnt_lo_s cn30xx;
1026 	struct cvmx_lmcx_dclk_cnt_lo_s cn31xx;
1027 	struct cvmx_lmcx_dclk_cnt_lo_s cn38xx;
1028 	struct cvmx_lmcx_dclk_cnt_lo_s cn38xxp2;
1029 	struct cvmx_lmcx_dclk_cnt_lo_s cn50xx;
1030 	struct cvmx_lmcx_dclk_cnt_lo_s cn52xx;
1031 	struct cvmx_lmcx_dclk_cnt_lo_s cn52xxp1;
1032 	struct cvmx_lmcx_dclk_cnt_lo_s cn56xx;
1033 	struct cvmx_lmcx_dclk_cnt_lo_s cn56xxp1;
1034 	struct cvmx_lmcx_dclk_cnt_lo_s cn58xx;
1035 	struct cvmx_lmcx_dclk_cnt_lo_s cn58xxp1;
1036 };
1037 
1038 /**
1039  * cvmx_lmc#_dclk_ctl
1040  *
1041  * LMC_DCLK_CTL = LMC DCLK generation control
1042  *
1043  *
1044  * Notes:
1045  * This CSR is only relevant for LMC1. LMC0_DCLK_CTL is not used.
1046  *
1047  */
1048 union cvmx_lmcx_dclk_ctl {
1049 	u64 u64;
1050 	struct cvmx_lmcx_dclk_ctl_s {
1051 		uint64_t reserved_8_63:56;
1052 		uint64_t off90_ena:1;
1053 		uint64_t dclk90_byp:1;
1054 		uint64_t dclk90_ld:1;
1055 		uint64_t dclk90_vlu:5;
1056 	} s;
1057 	struct cvmx_lmcx_dclk_ctl_s cn56xx;
1058 	struct cvmx_lmcx_dclk_ctl_s cn56xxp1;
1059 };
1060 
1061 /**
1062  * cvmx_lmc#_ddr2_ctl
1063  *
1064  * LMC_DDR2_CTL = LMC DDR2 & DLL Control Register
1065  *
1066  */
1067 union cvmx_lmcx_ddr2_ctl {
1068 	u64 u64;
1069 	struct cvmx_lmcx_ddr2_ctl_s {
1070 		uint64_t reserved_32_63:32;
1071 		uint64_t bank8:1;
1072 		uint64_t burst8:1;
1073 		uint64_t addlat:3;
1074 		uint64_t pocas:1;
1075 		uint64_t bwcnt:1;
1076 		uint64_t twr:3;
1077 		uint64_t silo_hc:1;
1078 		uint64_t ddr_eof:4;
1079 		uint64_t tfaw:5;
1080 		uint64_t crip_mode:1;
1081 		uint64_t ddr2t:1;
1082 		uint64_t odt_ena:1;
1083 		uint64_t qdll_ena:1;
1084 		uint64_t dll90_vlu:5;
1085 		uint64_t dll90_byp:1;
1086 		uint64_t rdqs:1;
1087 		uint64_t ddr2:1;
1088 	} s;
1089 	struct cvmx_lmcx_ddr2_ctl_cn30xx {
1090 		uint64_t reserved_32_63:32;
1091 		uint64_t bank8:1;
1092 		uint64_t burst8:1;
1093 		uint64_t addlat:3;
1094 		uint64_t pocas:1;
1095 		uint64_t bwcnt:1;
1096 		uint64_t twr:3;
1097 		uint64_t silo_hc:1;
1098 		uint64_t ddr_eof:4;
1099 		uint64_t tfaw:5;
1100 		uint64_t crip_mode:1;
1101 		uint64_t ddr2t:1;
1102 		uint64_t odt_ena:1;
1103 		uint64_t qdll_ena:1;
1104 		uint64_t dll90_vlu:5;
1105 		uint64_t dll90_byp:1;
1106 		uint64_t reserved_1_1:1;
1107 		uint64_t ddr2:1;
1108 	} cn30xx;
1109 	struct cvmx_lmcx_ddr2_ctl_cn30xx cn31xx;
1110 	struct cvmx_lmcx_ddr2_ctl_s cn38xx;
1111 	struct cvmx_lmcx_ddr2_ctl_s cn38xxp2;
1112 	struct cvmx_lmcx_ddr2_ctl_s cn50xx;
1113 	struct cvmx_lmcx_ddr2_ctl_s cn52xx;
1114 	struct cvmx_lmcx_ddr2_ctl_s cn52xxp1;
1115 	struct cvmx_lmcx_ddr2_ctl_s cn56xx;
1116 	struct cvmx_lmcx_ddr2_ctl_s cn56xxp1;
1117 	struct cvmx_lmcx_ddr2_ctl_s cn58xx;
1118 	struct cvmx_lmcx_ddr2_ctl_s cn58xxp1;
1119 };
1120 
1121 /**
1122  * cvmx_lmc#_ddr4_dimm_ctl
1123  *
1124  * Bits 0-21 of this register are used only when LMC()_CONTROL[RDIMM_ENA] = 1.
1125  *
1126  * During an RCW initialization sequence, bits 0-21 control LMC's write
1127  * operations to the extended DDR4 control words in the JEDEC standard
1128  * registering clock driver on an RDIMM.
1129  */
1130 union cvmx_lmcx_ddr4_dimm_ctl {
1131 	u64 u64;
1132 	struct cvmx_lmcx_ddr4_dimm_ctl_s {
1133 		uint64_t reserved_28_63:36;
1134 		uint64_t rank_timing_enable:1;
1135 		uint64_t bodt_trans_mode:1;
1136 		uint64_t trans_mode_ena:1;
1137 		uint64_t read_preamble_mode:1;
1138 		uint64_t buff_config_da3:1;
1139 		uint64_t mpr_over_ena:1;
1140 		uint64_t ddr4_dimm1_wmask:11;
1141 		uint64_t ddr4_dimm0_wmask:11;
1142 	} s;
1143 	struct cvmx_lmcx_ddr4_dimm_ctl_cn70xx {
1144 		uint64_t reserved_22_63:42;
1145 		uint64_t ddr4_dimm1_wmask:11;
1146 		uint64_t ddr4_dimm0_wmask:11;
1147 	} cn70xx;
1148 	struct cvmx_lmcx_ddr4_dimm_ctl_cn70xx cn70xxp1;
1149 	struct cvmx_lmcx_ddr4_dimm_ctl_s cn73xx;
1150 	struct cvmx_lmcx_ddr4_dimm_ctl_s cn78xx;
1151 	struct cvmx_lmcx_ddr4_dimm_ctl_s cn78xxp1;
1152 	struct cvmx_lmcx_ddr4_dimm_ctl_s cnf75xx;
1153 };
1154 
1155 /**
1156  * cvmx_lmc#_ddr_pll_ctl
1157  *
1158  * This register controls the DDR_CK frequency. For details, refer to CK
1159  * speed programming. See LMC initialization sequence for the initialization
1160  * sequence.
1161  * DDR PLL bringup sequence:
1162  *
1163  * 1. Write [CLKF], [CLKR], [DDR_PS_EN].
1164  *
1165  * 2. Wait 128 ref clock cycles (7680 core-clock cycles).
1166  *
1167  * 3. Write 1 to [RESET_N].
1168  *
1169  * 4. Wait 1152 ref clocks (1152*16 core-clock cycles).
1170  *
1171  * 5. Write 0 to [DDR_DIV_RESET].
1172  *
1173  * 6. Wait 10 ref clock cycles (160 core-clock cycles) before bringing up
1174  * the DDR interface.
1175  */
1176 union cvmx_lmcx_ddr_pll_ctl {
1177 	u64 u64;
1178 	struct cvmx_lmcx_ddr_pll_ctl_s {
1179 		uint64_t reserved_45_63:19;
1180 		uint64_t dclk_alt_refclk_sel:1;
1181 		uint64_t bwadj:12;
1182 		uint64_t dclk_invert:1;
1183 		uint64_t phy_dcok:1;
1184 		uint64_t ddr4_mode:1;
1185 		uint64_t pll_fbslip:1;
1186 		uint64_t pll_lock:1;
1187 		uint64_t reserved_18_26:9;
1188 		uint64_t diffamp:4;
1189 		uint64_t cps:3;
1190 		uint64_t reserved_8_10:3;
1191 		uint64_t reset_n:1;
1192 		uint64_t clkf:7;
1193 	} s;
1194 	struct cvmx_lmcx_ddr_pll_ctl_cn61xx {
1195 		uint64_t reserved_27_63:37;
1196 		uint64_t jtg_test_mode:1;
1197 		uint64_t dfm_div_reset:1;
1198 		uint64_t dfm_ps_en:3;
1199 		uint64_t ddr_div_reset:1;
1200 		uint64_t ddr_ps_en:3;
1201 		uint64_t diffamp:4;
1202 		uint64_t cps:3;
1203 		uint64_t cpb:3;
1204 		uint64_t reset_n:1;
1205 		uint64_t clkf:7;
1206 	} cn61xx;
1207 	struct cvmx_lmcx_ddr_pll_ctl_cn61xx cn63xx;
1208 	struct cvmx_lmcx_ddr_pll_ctl_cn61xx cn63xxp1;
1209 	struct cvmx_lmcx_ddr_pll_ctl_cn61xx cn66xx;
1210 	struct cvmx_lmcx_ddr_pll_ctl_cn61xx cn68xx;
1211 	struct cvmx_lmcx_ddr_pll_ctl_cn61xx cn68xxp1;
1212 	struct cvmx_lmcx_ddr_pll_ctl_cn70xx {
1213 		uint64_t reserved_31_63:33;
1214 		uint64_t phy_dcok:1;
1215 		uint64_t ddr4_mode:1;
1216 		uint64_t pll_fbslip:1;
1217 		uint64_t pll_lock:1;
1218 		uint64_t pll_rfslip:1;
1219 		uint64_t clkr:2;
1220 		uint64_t jtg_test_mode:1;
1221 		uint64_t ddr_div_reset:1;
1222 		uint64_t ddr_ps_en:4;
1223 		uint64_t reserved_8_17:10;
1224 		uint64_t reset_n:1;
1225 		uint64_t clkf:7;
1226 	} cn70xx;
1227 	struct cvmx_lmcx_ddr_pll_ctl_cn70xx cn70xxp1;
1228 	struct cvmx_lmcx_ddr_pll_ctl_cn73xx {
1229 		uint64_t reserved_45_63:19;
1230 		uint64_t dclk_alt_refclk_sel:1;
1231 		uint64_t bwadj:12;
1232 		uint64_t dclk_invert:1;
1233 		uint64_t phy_dcok:1;
1234 		uint64_t ddr4_mode:1;
1235 		uint64_t pll_fbslip:1;
1236 		uint64_t pll_lock:1;
1237 		uint64_t pll_rfslip:1;
1238 		uint64_t clkr:2;
1239 		uint64_t jtg_test_mode:1;
1240 		uint64_t ddr_div_reset:1;
1241 		uint64_t ddr_ps_en:4;
1242 		uint64_t reserved_9_17:9;
1243 		uint64_t clkf_ext:1;
1244 		uint64_t reset_n:1;
1245 		uint64_t clkf:7;
1246 	} cn73xx;
1247 	struct cvmx_lmcx_ddr_pll_ctl_cn73xx cn78xx;
1248 	struct cvmx_lmcx_ddr_pll_ctl_cn73xx cn78xxp1;
1249 	struct cvmx_lmcx_ddr_pll_ctl_cn61xx cnf71xx;
1250 	struct cvmx_lmcx_ddr_pll_ctl_cn73xx cnf75xx;
1251 };
1252 
1253 /**
1254  * cvmx_lmc#_delay_cfg
1255  *
1256  * LMC_DELAY_CFG = Open-loop delay line settings
1257  *
1258  *
1259  * Notes:
1260  * The DQ bits add OUTGOING delay only to dq, dqs_[p,n], cb, cbs_[p,n], dqm.
1261  * Delay is approximately 50-80ps per setting depending on process/voltage.
1262  * There is no need to add incoming delay since by default all strobe bits
1263  * are delayed internally by 90 degrees (as was always the case in previous
1264  * passes and past chips.
1265  *
1266  * The CMD add delay to all command bits DDR_RAS, DDR_CAS, DDR_A<15:0>,
1267  * DDR_BA<2:0>, DDR_n_CS<1:0>_L, DDR_WE, DDR_CKE and DDR_ODT_<7:0>.
1268  * Again, delay is 50-80ps per tap.
1269  *
1270  * The CLK bits add delay to all clock signals DDR_CK_<5:0>_P and
1271  * DDR_CK_<5:0>_N.  Again, delay is 50-80ps per tap.
1272  *
1273  * The usage scenario is the following: There is too much delay on command
1274  * signals and setup on command is not met. The user can then delay the
1275  * clock until setup is met.
1276  *
1277  * At the same time though, dq/dqs should be delayed because there is also
1278  * a DDR spec tying dqs with clock. If clock is too much delayed with
1279  * respect to dqs, writes will start to fail.
1280  *
1281  * This scheme should eliminate the board need of adding routing delay to
1282  * clock signals to make high frequencies work.
1283  */
1284 union cvmx_lmcx_delay_cfg {
1285 	u64 u64;
1286 	struct cvmx_lmcx_delay_cfg_s {
1287 		uint64_t reserved_15_63:49;
1288 		uint64_t dq:5;
1289 		uint64_t cmd:5;
1290 		uint64_t clk:5;
1291 	} s;
1292 	struct cvmx_lmcx_delay_cfg_s cn30xx;
1293 	struct cvmx_lmcx_delay_cfg_cn38xx {
1294 		uint64_t reserved_14_63:50;
1295 		uint64_t dq:4;
1296 		uint64_t reserved_9_9:1;
1297 		uint64_t cmd:4;
1298 		uint64_t reserved_4_4:1;
1299 		uint64_t clk:4;
1300 	} cn38xx;
1301 	struct cvmx_lmcx_delay_cfg_cn38xx cn50xx;
1302 	struct cvmx_lmcx_delay_cfg_cn38xx cn52xx;
1303 	struct cvmx_lmcx_delay_cfg_cn38xx cn52xxp1;
1304 	struct cvmx_lmcx_delay_cfg_cn38xx cn56xx;
1305 	struct cvmx_lmcx_delay_cfg_cn38xx cn56xxp1;
1306 	struct cvmx_lmcx_delay_cfg_cn38xx cn58xx;
1307 	struct cvmx_lmcx_delay_cfg_cn38xx cn58xxp1;
1308 };
1309 
1310 /**
1311  * cvmx_lmc#_dimm#_ddr4_params0
1312  *
1313  * This register contains values to be programmed into the extra DDR4 control
1314  * words in the corresponding (registered) DIMM. These are control words
1315  * RC1x through RC8x.
1316  */
1317 union cvmx_lmcx_dimmx_ddr4_params0 {
1318 	u64 u64;
1319 	struct cvmx_lmcx_dimmx_ddr4_params0_s {
1320 		uint64_t rc8x:8;
1321 		uint64_t rc7x:8;
1322 		uint64_t rc6x:8;
1323 		uint64_t rc5x:8;
1324 		uint64_t rc4x:8;
1325 		uint64_t rc3x:8;
1326 		uint64_t rc2x:8;
1327 		uint64_t rc1x:8;
1328 	} s;
1329 	struct cvmx_lmcx_dimmx_ddr4_params0_s cn70xx;
1330 	struct cvmx_lmcx_dimmx_ddr4_params0_s cn70xxp1;
1331 	struct cvmx_lmcx_dimmx_ddr4_params0_s cn73xx;
1332 	struct cvmx_lmcx_dimmx_ddr4_params0_s cn78xx;
1333 	struct cvmx_lmcx_dimmx_ddr4_params0_s cn78xxp1;
1334 	struct cvmx_lmcx_dimmx_ddr4_params0_s cnf75xx;
1335 };
1336 
1337 /**
1338  * cvmx_lmc#_dimm#_ddr4_params1
1339  *
1340  * This register contains values to be programmed into the extra DDR4 control
1341  * words in the corresponding (registered) DIMM. These are control words
1342  * RC9x through RCBx.
1343  */
1344 union cvmx_lmcx_dimmx_ddr4_params1 {
1345 	u64 u64;
1346 	struct cvmx_lmcx_dimmx_ddr4_params1_s {
1347 		uint64_t reserved_24_63:40;
1348 		uint64_t rcbx:8;
1349 		uint64_t rcax:8;
1350 		uint64_t rc9x:8;
1351 	} s;
1352 	struct cvmx_lmcx_dimmx_ddr4_params1_s cn70xx;
1353 	struct cvmx_lmcx_dimmx_ddr4_params1_s cn70xxp1;
1354 	struct cvmx_lmcx_dimmx_ddr4_params1_s cn73xx;
1355 	struct cvmx_lmcx_dimmx_ddr4_params1_s cn78xx;
1356 	struct cvmx_lmcx_dimmx_ddr4_params1_s cn78xxp1;
1357 	struct cvmx_lmcx_dimmx_ddr4_params1_s cnf75xx;
1358 };
1359 
1360 /**
1361  * cvmx_lmc#_dimm#_params
1362  *
1363  * This register contains values to be programmed into each control word in
1364  * the corresponding (registered) DIMM. The control words allow optimization
1365  * of the device properties for different raw card designs. Note that LMC
1366  * only uses this CSR when LMC()_CONTROL[RDIMM_ENA]=1. During a power-up/init
1367  * sequence, LMC writes these fields into the control words in the JEDEC
1368  * standard DDR3 SSTE32882 registering clock driver or DDR4 Register
1369  * DDR4RCD01 on an RDIMM when corresponding LMC()_DIMM_CTL[DIMM*_WMASK]
1370  * bits are set.
1371  */
1372 union cvmx_lmcx_dimmx_params {
1373 	u64 u64;
1374 	struct cvmx_lmcx_dimmx_params_s {
1375 		uint64_t rc15:4;
1376 		uint64_t rc14:4;
1377 		uint64_t rc13:4;
1378 		uint64_t rc12:4;
1379 		uint64_t rc11:4;
1380 		uint64_t rc10:4;
1381 		uint64_t rc9:4;
1382 		uint64_t rc8:4;
1383 		uint64_t rc7:4;
1384 		uint64_t rc6:4;
1385 		uint64_t rc5:4;
1386 		uint64_t rc4:4;
1387 		uint64_t rc3:4;
1388 		uint64_t rc2:4;
1389 		uint64_t rc1:4;
1390 		uint64_t rc0:4;
1391 	} s;
1392 	struct cvmx_lmcx_dimmx_params_s cn61xx;
1393 	struct cvmx_lmcx_dimmx_params_s cn63xx;
1394 	struct cvmx_lmcx_dimmx_params_s cn63xxp1;
1395 	struct cvmx_lmcx_dimmx_params_s cn66xx;
1396 	struct cvmx_lmcx_dimmx_params_s cn68xx;
1397 	struct cvmx_lmcx_dimmx_params_s cn68xxp1;
1398 	struct cvmx_lmcx_dimmx_params_s cn70xx;
1399 	struct cvmx_lmcx_dimmx_params_s cn70xxp1;
1400 	struct cvmx_lmcx_dimmx_params_s cn73xx;
1401 	struct cvmx_lmcx_dimmx_params_s cn78xx;
1402 	struct cvmx_lmcx_dimmx_params_s cn78xxp1;
1403 	struct cvmx_lmcx_dimmx_params_s cnf71xx;
1404 	struct cvmx_lmcx_dimmx_params_s cnf75xx;
1405 };
1406 
1407 /**
1408  * cvmx_lmc#_dimm_ctl
1409  *
1410  * Note that this CSR is only used when LMC()_CONTROL[RDIMM_ENA] = 1. During
1411  * a power-up/init sequence, this CSR controls LMC's write operations to the
1412  * control words in the JEDEC standard DDR3 SSTE32882 registering clock
1413  * driver or DDR4 Register DDR4RCD01 on an RDIMM.
1414  */
1415 union cvmx_lmcx_dimm_ctl {
1416 	u64 u64;
1417 	struct cvmx_lmcx_dimm_ctl_s {
1418 		uint64_t reserved_46_63:18;
1419 		uint64_t parity:1;
1420 		uint64_t tcws:13;
1421 		uint64_t dimm1_wmask:16;
1422 		uint64_t dimm0_wmask:16;
1423 	} s;
1424 	struct cvmx_lmcx_dimm_ctl_s cn61xx;
1425 	struct cvmx_lmcx_dimm_ctl_s cn63xx;
1426 	struct cvmx_lmcx_dimm_ctl_s cn63xxp1;
1427 	struct cvmx_lmcx_dimm_ctl_s cn66xx;
1428 	struct cvmx_lmcx_dimm_ctl_s cn68xx;
1429 	struct cvmx_lmcx_dimm_ctl_s cn68xxp1;
1430 	struct cvmx_lmcx_dimm_ctl_s cn70xx;
1431 	struct cvmx_lmcx_dimm_ctl_s cn70xxp1;
1432 	struct cvmx_lmcx_dimm_ctl_s cn73xx;
1433 	struct cvmx_lmcx_dimm_ctl_s cn78xx;
1434 	struct cvmx_lmcx_dimm_ctl_s cn78xxp1;
1435 	struct cvmx_lmcx_dimm_ctl_s cnf71xx;
1436 	struct cvmx_lmcx_dimm_ctl_s cnf75xx;
1437 };
1438 
1439 /**
1440  * cvmx_lmc#_dll_ctl
1441  *
1442  * LMC_DLL_CTL = LMC DLL control and DCLK reset
1443  *
1444  */
1445 union cvmx_lmcx_dll_ctl {
1446 	u64 u64;
1447 	struct cvmx_lmcx_dll_ctl_s {
1448 		uint64_t reserved_8_63:56;
1449 		uint64_t dreset:1;
1450 		uint64_t dll90_byp:1;
1451 		uint64_t dll90_ena:1;
1452 		uint64_t dll90_vlu:5;
1453 	} s;
1454 	struct cvmx_lmcx_dll_ctl_s cn52xx;
1455 	struct cvmx_lmcx_dll_ctl_s cn52xxp1;
1456 	struct cvmx_lmcx_dll_ctl_s cn56xx;
1457 	struct cvmx_lmcx_dll_ctl_s cn56xxp1;
1458 };
1459 
1460 /**
1461  * cvmx_lmc#_dll_ctl2
1462  *
1463  * See LMC initialization sequence for the initialization sequence.
1464  *
1465  */
1466 union cvmx_lmcx_dll_ctl2 {
1467 	u64 u64;
1468 	struct cvmx_lmcx_dll_ctl2_s {
1469 		uint64_t reserved_0_63:64;
1470 	} s;
1471 	struct cvmx_lmcx_dll_ctl2_cn61xx {
1472 		uint64_t reserved_16_63:48;
1473 		uint64_t intf_en:1;
1474 		uint64_t dll_bringup:1;
1475 		uint64_t dreset:1;
1476 		uint64_t quad_dll_ena:1;
1477 		uint64_t byp_sel:4;
1478 		uint64_t byp_setting:8;
1479 	} cn61xx;
1480 	struct cvmx_lmcx_dll_ctl2_cn63xx {
1481 		uint64_t reserved_15_63:49;
1482 		uint64_t dll_bringup:1;
1483 		uint64_t dreset:1;
1484 		uint64_t quad_dll_ena:1;
1485 		uint64_t byp_sel:4;
1486 		uint64_t byp_setting:8;
1487 	} cn63xx;
1488 	struct cvmx_lmcx_dll_ctl2_cn63xx cn63xxp1;
1489 	struct cvmx_lmcx_dll_ctl2_cn63xx cn66xx;
1490 	struct cvmx_lmcx_dll_ctl2_cn61xx cn68xx;
1491 	struct cvmx_lmcx_dll_ctl2_cn61xx cn68xxp1;
1492 	struct cvmx_lmcx_dll_ctl2_cn70xx {
1493 		uint64_t reserved_17_63:47;
1494 		uint64_t intf_en:1;
1495 		uint64_t dll_bringup:1;
1496 		uint64_t dreset:1;
1497 		uint64_t quad_dll_ena:1;
1498 		uint64_t byp_sel:4;
1499 		uint64_t byp_setting:9;
1500 	} cn70xx;
1501 	struct cvmx_lmcx_dll_ctl2_cn70xx cn70xxp1;
1502 	struct cvmx_lmcx_dll_ctl2_cn70xx cn73xx;
1503 	struct cvmx_lmcx_dll_ctl2_cn70xx cn78xx;
1504 	struct cvmx_lmcx_dll_ctl2_cn70xx cn78xxp1;
1505 	struct cvmx_lmcx_dll_ctl2_cn61xx cnf71xx;
1506 	struct cvmx_lmcx_dll_ctl2_cn70xx cnf75xx;
1507 };
1508 
1509 /**
1510  * cvmx_lmc#_dll_ctl3
1511  *
1512  * LMC_DLL_CTL3 = LMC DLL control and DCLK reset
1513  *
1514  */
1515 union cvmx_lmcx_dll_ctl3 {
1516 	u64 u64;
1517 	struct cvmx_lmcx_dll_ctl3_s {
1518 		uint64_t reserved_50_63:14;
1519 		uint64_t wr_deskew_ena:1;
1520 		uint64_t wr_deskew_ld:1;
1521 		uint64_t bit_select:4;
1522 		uint64_t reserved_0_43:44;
1523 	} s;
1524 	struct cvmx_lmcx_dll_ctl3_cn61xx {
1525 		uint64_t reserved_41_63:23;
1526 		uint64_t dclk90_fwd:1;
1527 		uint64_t ddr_90_dly_byp:1;
1528 		uint64_t dclk90_recal_dis:1;
1529 		uint64_t dclk90_byp_sel:1;
1530 		uint64_t dclk90_byp_setting:8;
1531 		uint64_t dll_fast:1;
1532 		uint64_t dll90_setting:8;
1533 		uint64_t fine_tune_mode:1;
1534 		uint64_t dll_mode:1;
1535 		uint64_t dll90_byte_sel:4;
1536 		uint64_t offset_ena:1;
1537 		uint64_t load_offset:1;
1538 		uint64_t mode_sel:2;
1539 		uint64_t byte_sel:4;
1540 		uint64_t offset:6;
1541 	} cn61xx;
1542 	struct cvmx_lmcx_dll_ctl3_cn63xx {
1543 		uint64_t reserved_29_63:35;
1544 		uint64_t dll_fast:1;
1545 		uint64_t dll90_setting:8;
1546 		uint64_t fine_tune_mode:1;
1547 		uint64_t dll_mode:1;
1548 		uint64_t dll90_byte_sel:4;
1549 		uint64_t offset_ena:1;
1550 		uint64_t load_offset:1;
1551 		uint64_t mode_sel:2;
1552 		uint64_t byte_sel:4;
1553 		uint64_t offset:6;
1554 	} cn63xx;
1555 	struct cvmx_lmcx_dll_ctl3_cn63xx cn63xxp1;
1556 	struct cvmx_lmcx_dll_ctl3_cn63xx cn66xx;
1557 	struct cvmx_lmcx_dll_ctl3_cn61xx cn68xx;
1558 	struct cvmx_lmcx_dll_ctl3_cn61xx cn68xxp1;
1559 	struct cvmx_lmcx_dll_ctl3_cn70xx {
1560 		uint64_t reserved_44_63:20;
1561 		uint64_t dclk90_fwd:1;
1562 		uint64_t ddr_90_dly_byp:1;
1563 		uint64_t dclk90_recal_dis:1;
1564 		uint64_t dclk90_byp_sel:1;
1565 		uint64_t dclk90_byp_setting:9;
1566 		uint64_t dll_fast:1;
1567 		uint64_t dll90_setting:9;
1568 		uint64_t fine_tune_mode:1;
1569 		uint64_t dll_mode:1;
1570 		uint64_t dll90_byte_sel:4;
1571 		uint64_t offset_ena:1;
1572 		uint64_t load_offset:1;
1573 		uint64_t mode_sel:2;
1574 		uint64_t byte_sel:4;
1575 		uint64_t offset:7;
1576 	} cn70xx;
1577 	struct cvmx_lmcx_dll_ctl3_cn70xx cn70xxp1;
1578 	struct cvmx_lmcx_dll_ctl3_cn73xx {
1579 		uint64_t reserved_50_63:14;
1580 		uint64_t wr_deskew_ena:1;
1581 		uint64_t wr_deskew_ld:1;
1582 		uint64_t bit_select:4;
1583 		uint64_t dclk90_fwd:1;
1584 		uint64_t ddr_90_dly_byp:1;
1585 		uint64_t dclk90_recal_dis:1;
1586 		uint64_t dclk90_byp_sel:1;
1587 		uint64_t dclk90_byp_setting:9;
1588 		uint64_t dll_fast:1;
1589 		uint64_t dll90_setting:9;
1590 		uint64_t fine_tune_mode:1;
1591 		uint64_t dll_mode:1;
1592 		uint64_t dll90_byte_sel:4;
1593 		uint64_t offset_ena:1;
1594 		uint64_t load_offset:1;
1595 		uint64_t mode_sel:2;
1596 		uint64_t byte_sel:4;
1597 		uint64_t offset:7;
1598 	} cn73xx;
1599 	struct cvmx_lmcx_dll_ctl3_cn73xx cn78xx;
1600 	struct cvmx_lmcx_dll_ctl3_cn73xx cn78xxp1;
1601 	struct cvmx_lmcx_dll_ctl3_cn61xx cnf71xx;
1602 	struct cvmx_lmcx_dll_ctl3_cn73xx cnf75xx;
1603 };
1604 
1605 /**
1606  * cvmx_lmc#_dual_memcfg
1607  *
1608  * This register controls certain parameters of dual-memory configuration.
1609  *
1610  * This register enables the design to have two separate memory
1611  * configurations, selected dynamically by the reference address. Note
1612  * however, that both configurations share LMC()_CONTROL[XOR_BANK],
1613  * LMC()_CONFIG [PBANK_LSB], LMC()_CONFIG[RANK_ENA], and all timing parameters.
1614  *
1615  * In this description:
1616  * * config0 refers to the normal memory configuration that is defined by the
1617  * LMC()_CONFIG[ROW_LSB] parameter
1618  * * config1 refers to the dual (or second) memory configuration that is
1619  * defined by this register.
1620  */
1621 union cvmx_lmcx_dual_memcfg {
1622 	u64 u64;
1623 	struct cvmx_lmcx_dual_memcfg_s {
1624 		uint64_t reserved_20_63:44;
1625 		uint64_t bank8:1;
1626 		uint64_t row_lsb:3;
1627 		uint64_t reserved_8_15:8;
1628 		uint64_t cs_mask:8;
1629 	} s;
1630 	struct cvmx_lmcx_dual_memcfg_s cn50xx;
1631 	struct cvmx_lmcx_dual_memcfg_s cn52xx;
1632 	struct cvmx_lmcx_dual_memcfg_s cn52xxp1;
1633 	struct cvmx_lmcx_dual_memcfg_s cn56xx;
1634 	struct cvmx_lmcx_dual_memcfg_s cn56xxp1;
1635 	struct cvmx_lmcx_dual_memcfg_s cn58xx;
1636 	struct cvmx_lmcx_dual_memcfg_s cn58xxp1;
1637 	struct cvmx_lmcx_dual_memcfg_cn61xx {
1638 		uint64_t reserved_19_63:45;
1639 		uint64_t row_lsb:3;
1640 		uint64_t reserved_8_15:8;
1641 		uint64_t cs_mask:8;
1642 	} cn61xx;
1643 	struct cvmx_lmcx_dual_memcfg_cn61xx cn63xx;
1644 	struct cvmx_lmcx_dual_memcfg_cn61xx cn63xxp1;
1645 	struct cvmx_lmcx_dual_memcfg_cn61xx cn66xx;
1646 	struct cvmx_lmcx_dual_memcfg_cn61xx cn68xx;
1647 	struct cvmx_lmcx_dual_memcfg_cn61xx cn68xxp1;
1648 	struct cvmx_lmcx_dual_memcfg_cn70xx {
1649 		uint64_t reserved_19_63:45;
1650 		uint64_t row_lsb:3;
1651 		uint64_t reserved_4_15:12;
1652 		uint64_t cs_mask:4;
1653 	} cn70xx;
1654 	struct cvmx_lmcx_dual_memcfg_cn70xx cn70xxp1;
1655 	struct cvmx_lmcx_dual_memcfg_cn70xx cn73xx;
1656 	struct cvmx_lmcx_dual_memcfg_cn70xx cn78xx;
1657 	struct cvmx_lmcx_dual_memcfg_cn70xx cn78xxp1;
1658 	struct cvmx_lmcx_dual_memcfg_cn61xx cnf71xx;
1659 	struct cvmx_lmcx_dual_memcfg_cn70xx cnf75xx;
1660 };
1661 
1662 /**
1663  * cvmx_lmc#_ecc_parity_test
1664  *
1665  * This register has bits to control the generation of ECC and command
1666  * address parity errors. ECC error is generated by enabling
1667  * [CA_PARITY_CORRUPT_ENA] and selecting any of the [ECC_CORRUPT_IDX]
1668  * index of the dataword from the cacheline to be corrupted.
1669  * User needs to select which bit of the 128-bit dataword to corrupt by
1670  * asserting any of the CHAR_MASK0 and CHAR_MASK2 bits. (CHAR_MASK0 and
1671  * CHAR_MASK2 corresponds to the lower and upper 64-bit signal that can
1672  * corrupt any individual bit of the data).
1673  *
1674  * Command address parity error is generated by enabling
1675  * [CA_PARITY_CORRUPT_ENA] and selecting the DDR command that the parity
1676  * is to be corrupted with through [CA_PARITY_SEL].
1677  */
1678 union cvmx_lmcx_ecc_parity_test {
1679 	u64 u64;
1680 	struct cvmx_lmcx_ecc_parity_test_s {
1681 		uint64_t reserved_12_63:52;
1682 		uint64_t ecc_corrupt_ena:1;
1683 		uint64_t ecc_corrupt_idx:3;
1684 		uint64_t reserved_6_7:2;
1685 		uint64_t ca_parity_corrupt_ena:1;
1686 		uint64_t ca_parity_sel:5;
1687 	} s;
1688 	struct cvmx_lmcx_ecc_parity_test_s cn73xx;
1689 	struct cvmx_lmcx_ecc_parity_test_s cn78xx;
1690 	struct cvmx_lmcx_ecc_parity_test_s cn78xxp1;
1691 	struct cvmx_lmcx_ecc_parity_test_s cnf75xx;
1692 };
1693 
1694 /**
1695  * cvmx_lmc#_ecc_synd
1696  *
1697  * LMC_ECC_SYND = MRD ECC Syndromes
1698  *
1699  */
1700 union cvmx_lmcx_ecc_synd {
1701 	u64 u64;
1702 	struct cvmx_lmcx_ecc_synd_s {
1703 		uint64_t reserved_32_63:32;
1704 		uint64_t mrdsyn3:8;
1705 		uint64_t mrdsyn2:8;
1706 		uint64_t mrdsyn1:8;
1707 		uint64_t mrdsyn0:8;
1708 	} s;
1709 	struct cvmx_lmcx_ecc_synd_s cn30xx;
1710 	struct cvmx_lmcx_ecc_synd_s cn31xx;
1711 	struct cvmx_lmcx_ecc_synd_s cn38xx;
1712 	struct cvmx_lmcx_ecc_synd_s cn38xxp2;
1713 	struct cvmx_lmcx_ecc_synd_s cn50xx;
1714 	struct cvmx_lmcx_ecc_synd_s cn52xx;
1715 	struct cvmx_lmcx_ecc_synd_s cn52xxp1;
1716 	struct cvmx_lmcx_ecc_synd_s cn56xx;
1717 	struct cvmx_lmcx_ecc_synd_s cn56xxp1;
1718 	struct cvmx_lmcx_ecc_synd_s cn58xx;
1719 	struct cvmx_lmcx_ecc_synd_s cn58xxp1;
1720 	struct cvmx_lmcx_ecc_synd_s cn61xx;
1721 	struct cvmx_lmcx_ecc_synd_s cn63xx;
1722 	struct cvmx_lmcx_ecc_synd_s cn63xxp1;
1723 	struct cvmx_lmcx_ecc_synd_s cn66xx;
1724 	struct cvmx_lmcx_ecc_synd_s cn68xx;
1725 	struct cvmx_lmcx_ecc_synd_s cn68xxp1;
1726 	struct cvmx_lmcx_ecc_synd_s cn70xx;
1727 	struct cvmx_lmcx_ecc_synd_s cn70xxp1;
1728 	struct cvmx_lmcx_ecc_synd_s cn73xx;
1729 	struct cvmx_lmcx_ecc_synd_s cn78xx;
1730 	struct cvmx_lmcx_ecc_synd_s cn78xxp1;
1731 	struct cvmx_lmcx_ecc_synd_s cnf71xx;
1732 	struct cvmx_lmcx_ecc_synd_s cnf75xx;
1733 };
1734 
1735 /**
1736  * cvmx_lmc#_ext_config
1737  *
1738  * This register has additional configuration and control bits for the LMC.
1739  *
1740  */
1741 union cvmx_lmcx_ext_config {
1742 	u64 u64;
1743 	struct cvmx_lmcx_ext_config_s {
1744 		uint64_t reserved_61_63:3;
1745 		uint64_t bc4_dqs_ena:1;
1746 		uint64_t ref_block:1;
1747 		uint64_t mrs_side:1;
1748 		uint64_t mrs_one_side:1;
1749 		uint64_t mrs_bside_invert_disable:1;
1750 		uint64_t dimm_sel_invert_off:1;
1751 		uint64_t dimm_sel_force_invert:1;
1752 		uint64_t coalesce_address_mode:1;
1753 		uint64_t dimm1_cid:2;
1754 		uint64_t dimm0_cid:2;
1755 		uint64_t rcd_parity_check:1;
1756 		uint64_t reserved_46_47:2;
1757 		uint64_t error_alert_n_sample:1;
1758 		uint64_t ea_int_polarity:1;
1759 		uint64_t reserved_43_43:1;
1760 		uint64_t par_addr_mask:3;
1761 		uint64_t reserved_38_39:2;
1762 		uint64_t mrs_cmd_override:1;
1763 		uint64_t mrs_cmd_select:1;
1764 		uint64_t reserved_33_35:3;
1765 		uint64_t invert_data:1;
1766 		uint64_t reserved_30_31:2;
1767 		uint64_t cmd_rti:1;
1768 		uint64_t cal_ena:1;
1769 		uint64_t reserved_27_27:1;
1770 		uint64_t par_include_a17:1;
1771 		uint64_t par_include_bg1:1;
1772 		uint64_t gen_par:1;
1773 		uint64_t reserved_21_23:3;
1774 		uint64_t vrefint_seq_deskew:1;
1775 		uint64_t read_ena_bprch:1;
1776 		uint64_t read_ena_fprch:1;
1777 		uint64_t slot_ctl_reset_force:1;
1778 		uint64_t ref_int_lsbs:9;
1779 		uint64_t drive_ena_bprch:1;
1780 		uint64_t drive_ena_fprch:1;
1781 		uint64_t dlcram_flip_synd:2;
1782 		uint64_t dlcram_cor_dis:1;
1783 		uint64_t dlc_nxm_rd:1;
1784 		uint64_t l2c_nxm_rd:1;
1785 		uint64_t l2c_nxm_wr:1;
1786 	} s;
1787 	struct cvmx_lmcx_ext_config_cn70xx {
1788 		uint64_t reserved_21_63:43;
1789 		uint64_t vrefint_seq_deskew:1;
1790 		uint64_t read_ena_bprch:1;
1791 		uint64_t read_ena_fprch:1;
1792 		uint64_t slot_ctl_reset_force:1;
1793 		uint64_t ref_int_lsbs:9;
1794 		uint64_t drive_ena_bprch:1;
1795 		uint64_t drive_ena_fprch:1;
1796 		uint64_t dlcram_flip_synd:2;
1797 		uint64_t dlcram_cor_dis:1;
1798 		uint64_t dlc_nxm_rd:1;
1799 		uint64_t l2c_nxm_rd:1;
1800 		uint64_t l2c_nxm_wr:1;
1801 	} cn70xx;
1802 	struct cvmx_lmcx_ext_config_cn70xx cn70xxp1;
1803 	struct cvmx_lmcx_ext_config_cn73xx {
1804 		uint64_t reserved_60_63:4;
1805 		uint64_t ref_block:1;
1806 		uint64_t mrs_side:1;
1807 		uint64_t mrs_one_side:1;
1808 		uint64_t mrs_bside_invert_disable:1;
1809 		uint64_t dimm_sel_invert_off:1;
1810 		uint64_t dimm_sel_force_invert:1;
1811 		uint64_t coalesce_address_mode:1;
1812 		uint64_t dimm1_cid:2;
1813 		uint64_t dimm0_cid:2;
1814 		uint64_t rcd_parity_check:1;
1815 		uint64_t reserved_46_47:2;
1816 		uint64_t error_alert_n_sample:1;
1817 		uint64_t ea_int_polarity:1;
1818 		uint64_t reserved_43_43:1;
1819 		uint64_t par_addr_mask:3;
1820 		uint64_t reserved_38_39:2;
1821 		uint64_t mrs_cmd_override:1;
1822 		uint64_t mrs_cmd_select:1;
1823 		uint64_t reserved_33_35:3;
1824 		uint64_t invert_data:1;
1825 		uint64_t reserved_30_31:2;
1826 		uint64_t cmd_rti:1;
1827 		uint64_t cal_ena:1;
1828 		uint64_t reserved_27_27:1;
1829 		uint64_t par_include_a17:1;
1830 		uint64_t par_include_bg1:1;
1831 		uint64_t gen_par:1;
1832 		uint64_t reserved_21_23:3;
1833 		uint64_t vrefint_seq_deskew:1;
1834 		uint64_t read_ena_bprch:1;
1835 		uint64_t read_ena_fprch:1;
1836 		uint64_t slot_ctl_reset_force:1;
1837 		uint64_t ref_int_lsbs:9;
1838 		uint64_t drive_ena_bprch:1;
1839 		uint64_t drive_ena_fprch:1;
1840 		uint64_t dlcram_flip_synd:2;
1841 		uint64_t dlcram_cor_dis:1;
1842 		uint64_t dlc_nxm_rd:1;
1843 		uint64_t l2c_nxm_rd:1;
1844 		uint64_t l2c_nxm_wr:1;
1845 	} cn73xx;
1846 	struct cvmx_lmcx_ext_config_s cn78xx;
1847 	struct cvmx_lmcx_ext_config_s cn78xxp1;
1848 	struct cvmx_lmcx_ext_config_cn73xx cnf75xx;
1849 };
1850 
1851 /**
1852  * cvmx_lmc#_ext_config2
1853  *
1854  * This register has additional configuration and control bits for the LMC.
1855  *
1856  */
1857 union cvmx_lmcx_ext_config2 {
1858 	u64 u64;
1859 	struct cvmx_lmcx_ext_config2_s {
1860 		uint64_t reserved_27_63:37;
1861 		uint64_t sref_auto_idle_thres:5;
1862 		uint64_t sref_auto_enable:1;
1863 		uint64_t delay_unload_r3:1;
1864 		uint64_t delay_unload_r2:1;
1865 		uint64_t delay_unload_r1:1;
1866 		uint64_t delay_unload_r0:1;
1867 		uint64_t early_dqx2:1;
1868 		uint64_t xor_bank_sel:4;
1869 		uint64_t reserved_10_11:2;
1870 		uint64_t row_col_switch:1;
1871 		uint64_t trr_on:1;
1872 		uint64_t mac:3;
1873 		uint64_t macram_scrub_done:1;
1874 		uint64_t macram_scrub:1;
1875 		uint64_t macram_flip_synd:2;
1876 		uint64_t macram_cor_dis:1;
1877 	} s;
1878 	struct cvmx_lmcx_ext_config2_cn73xx {
1879 		uint64_t reserved_10_63:54;
1880 		uint64_t row_col_switch:1;
1881 		uint64_t trr_on:1;
1882 		uint64_t mac:3;
1883 		uint64_t macram_scrub_done:1;
1884 		uint64_t macram_scrub:1;
1885 		uint64_t macram_flip_synd:2;
1886 		uint64_t macram_cor_dis:1;
1887 	} cn73xx;
1888 	struct cvmx_lmcx_ext_config2_s cn78xx;
1889 	struct cvmx_lmcx_ext_config2_cnf75xx {
1890 		uint64_t reserved_21_63:43;
1891 		uint64_t delay_unload_r3:1;
1892 		uint64_t delay_unload_r2:1;
1893 		uint64_t delay_unload_r1:1;
1894 		uint64_t delay_unload_r0:1;
1895 		uint64_t early_dqx2:1;
1896 		uint64_t xor_bank_sel:4;
1897 		uint64_t reserved_10_11:2;
1898 		uint64_t row_col_switch:1;
1899 		uint64_t trr_on:1;
1900 		uint64_t mac:3;
1901 		uint64_t macram_scrub_done:1;
1902 		uint64_t macram_scrub:1;
1903 		uint64_t macram_flip_synd:2;
1904 		uint64_t macram_cor_dis:1;
1905 	} cnf75xx;
1906 };
1907 
1908 /**
1909  * cvmx_lmc#_fadr
1910  *
1911  * This register only captures the first transaction with ECC errors. A DED
1912  * error can over-write this register with its failing addresses if the
1913  * first error was a SEC. If you write LMC()_INT -> SEC_ERR/DED_ERR, it
1914  * clears the error bits and captures the next failing address. If FDIMM
1915  * is 1, that means the error is in the high DIMM. LMC()_FADR captures the
1916  * failing pre-scrambled address location (split into DIMM, bunk, bank, etc).
1917  * If scrambling is off, then LMC()_FADR will also capture the failing
1918  * physical location in the DRAM parts. LMC()_SCRAMBLED_FADR captures the
1919  * actual failing address location in the physical DRAM parts, i.e.,
1920  * If scrambling is on, LMC()_SCRAMBLED_FADR contains the failing physical
1921  * location in the DRAM parts (split into DIMM, bunk, bank, etc.)
1922  * If scrambling is off, the pre-scramble and post-scramble addresses are
1923  * the same; and so the contents of LMC()_SCRAMBLED_FADR match the contents
1924  * of LMC()_FADR.
1925  */
1926 union cvmx_lmcx_fadr {
1927 	u64 u64;
1928 	struct cvmx_lmcx_fadr_s {
1929 		uint64_t reserved_43_63:21;
1930 		uint64_t fcid:3;
1931 		uint64_t fill_order:2;
1932 		uint64_t reserved_0_37:38;
1933 	} s;
1934 	struct cvmx_lmcx_fadr_cn30xx {
1935 		uint64_t reserved_32_63:32;
1936 		uint64_t fdimm:2;
1937 		uint64_t fbunk:1;
1938 		uint64_t fbank:3;
1939 		uint64_t frow:14;
1940 		uint64_t fcol:12;
1941 	} cn30xx;
1942 	struct cvmx_lmcx_fadr_cn30xx cn31xx;
1943 	struct cvmx_lmcx_fadr_cn30xx cn38xx;
1944 	struct cvmx_lmcx_fadr_cn30xx cn38xxp2;
1945 	struct cvmx_lmcx_fadr_cn30xx cn50xx;
1946 	struct cvmx_lmcx_fadr_cn30xx cn52xx;
1947 	struct cvmx_lmcx_fadr_cn30xx cn52xxp1;
1948 	struct cvmx_lmcx_fadr_cn30xx cn56xx;
1949 	struct cvmx_lmcx_fadr_cn30xx cn56xxp1;
1950 	struct cvmx_lmcx_fadr_cn30xx cn58xx;
1951 	struct cvmx_lmcx_fadr_cn30xx cn58xxp1;
1952 	struct cvmx_lmcx_fadr_cn61xx {
1953 		uint64_t reserved_36_63:28;
1954 		uint64_t fdimm:2;
1955 		uint64_t fbunk:1;
1956 		uint64_t fbank:3;
1957 		uint64_t frow:16;
1958 		uint64_t fcol:14;
1959 	} cn61xx;
1960 	struct cvmx_lmcx_fadr_cn61xx cn63xx;
1961 	struct cvmx_lmcx_fadr_cn61xx cn63xxp1;
1962 	struct cvmx_lmcx_fadr_cn61xx cn66xx;
1963 	struct cvmx_lmcx_fadr_cn61xx cn68xx;
1964 	struct cvmx_lmcx_fadr_cn61xx cn68xxp1;
1965 	struct cvmx_lmcx_fadr_cn70xx {
1966 		uint64_t reserved_40_63:24;
1967 		uint64_t fill_order:2;
1968 		uint64_t fdimm:1;
1969 		uint64_t fbunk:1;
1970 		uint64_t fbank:4;
1971 		uint64_t frow:18;
1972 		uint64_t fcol:14;
1973 	} cn70xx;
1974 	struct cvmx_lmcx_fadr_cn70xx cn70xxp1;
1975 	struct cvmx_lmcx_fadr_cn73xx {
1976 		uint64_t reserved_43_63:21;
1977 		uint64_t fcid:3;
1978 		uint64_t fill_order:2;
1979 		uint64_t fdimm:1;
1980 		uint64_t fbunk:1;
1981 		uint64_t fbank:4;
1982 		uint64_t frow:18;
1983 		uint64_t fcol:14;
1984 	} cn73xx;
1985 	struct cvmx_lmcx_fadr_cn73xx cn78xx;
1986 	struct cvmx_lmcx_fadr_cn73xx cn78xxp1;
1987 	struct cvmx_lmcx_fadr_cn61xx cnf71xx;
1988 	struct cvmx_lmcx_fadr_cn73xx cnf75xx;
1989 };
1990 
1991 /**
1992  * cvmx_lmc#_general_purpose0
1993  */
1994 union cvmx_lmcx_general_purpose0 {
1995 	u64 u64;
1996 	struct cvmx_lmcx_general_purpose0_s {
1997 		uint64_t data:64;
1998 	} s;
1999 	struct cvmx_lmcx_general_purpose0_s cn73xx;
2000 	struct cvmx_lmcx_general_purpose0_s cn78xx;
2001 	struct cvmx_lmcx_general_purpose0_s cnf75xx;
2002 };
2003 
2004 /**
2005  * cvmx_lmc#_general_purpose1
2006  */
2007 union cvmx_lmcx_general_purpose1 {
2008 	u64 u64;
2009 	struct cvmx_lmcx_general_purpose1_s {
2010 		uint64_t data:64;
2011 	} s;
2012 	struct cvmx_lmcx_general_purpose1_s cn73xx;
2013 	struct cvmx_lmcx_general_purpose1_s cn78xx;
2014 	struct cvmx_lmcx_general_purpose1_s cnf75xx;
2015 };
2016 
2017 /**
2018  * cvmx_lmc#_general_purpose2
2019  */
2020 union cvmx_lmcx_general_purpose2 {
2021 	u64 u64;
2022 	struct cvmx_lmcx_general_purpose2_s {
2023 		uint64_t reserved_16_63:48;
2024 		uint64_t data:16;
2025 	} s;
2026 	struct cvmx_lmcx_general_purpose2_s cn73xx;
2027 	struct cvmx_lmcx_general_purpose2_s cn78xx;
2028 	struct cvmx_lmcx_general_purpose2_s cnf75xx;
2029 };
2030 
2031 /**
2032  * cvmx_lmc#_ifb_cnt
2033  *
2034  * LMC_IFB_CNT  = Performance Counters
2035  *
2036  */
2037 union cvmx_lmcx_ifb_cnt {
2038 	u64 u64;
2039 	struct cvmx_lmcx_ifb_cnt_s {
2040 		uint64_t ifbcnt:64;
2041 	} s;
2042 	struct cvmx_lmcx_ifb_cnt_s cn61xx;
2043 	struct cvmx_lmcx_ifb_cnt_s cn63xx;
2044 	struct cvmx_lmcx_ifb_cnt_s cn63xxp1;
2045 	struct cvmx_lmcx_ifb_cnt_s cn66xx;
2046 	struct cvmx_lmcx_ifb_cnt_s cn68xx;
2047 	struct cvmx_lmcx_ifb_cnt_s cn68xxp1;
2048 	struct cvmx_lmcx_ifb_cnt_s cn70xx;
2049 	struct cvmx_lmcx_ifb_cnt_s cn70xxp1;
2050 	struct cvmx_lmcx_ifb_cnt_s cn73xx;
2051 	struct cvmx_lmcx_ifb_cnt_s cn78xx;
2052 	struct cvmx_lmcx_ifb_cnt_s cn78xxp1;
2053 	struct cvmx_lmcx_ifb_cnt_s cnf71xx;
2054 	struct cvmx_lmcx_ifb_cnt_s cnf75xx;
2055 };
2056 
2057 /**
2058  * cvmx_lmc#_ifb_cnt_hi
2059  *
2060  * LMC_IFB_CNT_HI  = Performance Counters
2061  *
2062  */
2063 union cvmx_lmcx_ifb_cnt_hi {
2064 	u64 u64;
2065 	struct cvmx_lmcx_ifb_cnt_hi_s {
2066 		uint64_t reserved_32_63:32;
2067 		uint64_t ifbcnt_hi:32;
2068 	} s;
2069 	struct cvmx_lmcx_ifb_cnt_hi_s cn30xx;
2070 	struct cvmx_lmcx_ifb_cnt_hi_s cn31xx;
2071 	struct cvmx_lmcx_ifb_cnt_hi_s cn38xx;
2072 	struct cvmx_lmcx_ifb_cnt_hi_s cn38xxp2;
2073 	struct cvmx_lmcx_ifb_cnt_hi_s cn50xx;
2074 	struct cvmx_lmcx_ifb_cnt_hi_s cn52xx;
2075 	struct cvmx_lmcx_ifb_cnt_hi_s cn52xxp1;
2076 	struct cvmx_lmcx_ifb_cnt_hi_s cn56xx;
2077 	struct cvmx_lmcx_ifb_cnt_hi_s cn56xxp1;
2078 	struct cvmx_lmcx_ifb_cnt_hi_s cn58xx;
2079 	struct cvmx_lmcx_ifb_cnt_hi_s cn58xxp1;
2080 };
2081 
2082 /**
2083  * cvmx_lmc#_ifb_cnt_lo
2084  *
2085  * LMC_IFB_CNT_LO  = Performance Counters
2086  *
2087  */
2088 union cvmx_lmcx_ifb_cnt_lo {
2089 	u64 u64;
2090 	struct cvmx_lmcx_ifb_cnt_lo_s {
2091 		uint64_t reserved_32_63:32;
2092 		uint64_t ifbcnt_lo:32;
2093 	} s;
2094 	struct cvmx_lmcx_ifb_cnt_lo_s cn30xx;
2095 	struct cvmx_lmcx_ifb_cnt_lo_s cn31xx;
2096 	struct cvmx_lmcx_ifb_cnt_lo_s cn38xx;
2097 	struct cvmx_lmcx_ifb_cnt_lo_s cn38xxp2;
2098 	struct cvmx_lmcx_ifb_cnt_lo_s cn50xx;
2099 	struct cvmx_lmcx_ifb_cnt_lo_s cn52xx;
2100 	struct cvmx_lmcx_ifb_cnt_lo_s cn52xxp1;
2101 	struct cvmx_lmcx_ifb_cnt_lo_s cn56xx;
2102 	struct cvmx_lmcx_ifb_cnt_lo_s cn56xxp1;
2103 	struct cvmx_lmcx_ifb_cnt_lo_s cn58xx;
2104 	struct cvmx_lmcx_ifb_cnt_lo_s cn58xxp1;
2105 };
2106 
2107 /**
2108  * cvmx_lmc#_int
2109  *
2110  * This register contains the different interrupt-summary bits of the LMC.
2111  *
2112  */
2113 union cvmx_lmcx_int {
2114 	u64 u64;
2115 	struct cvmx_lmcx_int_s {
2116 		uint64_t reserved_14_63:50;
2117 		uint64_t macram_ded_err:1;
2118 		uint64_t macram_sec_err:1;
2119 		uint64_t ddr_err:1;
2120 		uint64_t dlcram_ded_err:1;
2121 		uint64_t dlcram_sec_err:1;
2122 		uint64_t ded_err:4;
2123 		uint64_t sec_err:4;
2124 		uint64_t nxm_wr_err:1;
2125 	} s;
2126 	struct cvmx_lmcx_int_cn61xx {
2127 		uint64_t reserved_9_63:55;
2128 		uint64_t ded_err:4;
2129 		uint64_t sec_err:4;
2130 		uint64_t nxm_wr_err:1;
2131 	} cn61xx;
2132 	struct cvmx_lmcx_int_cn61xx cn63xx;
2133 	struct cvmx_lmcx_int_cn61xx cn63xxp1;
2134 	struct cvmx_lmcx_int_cn61xx cn66xx;
2135 	struct cvmx_lmcx_int_cn61xx cn68xx;
2136 	struct cvmx_lmcx_int_cn61xx cn68xxp1;
2137 	struct cvmx_lmcx_int_cn70xx {
2138 		uint64_t reserved_12_63:52;
2139 		uint64_t ddr_err:1;
2140 		uint64_t dlcram_ded_err:1;
2141 		uint64_t dlcram_sec_err:1;
2142 		uint64_t ded_err:4;
2143 		uint64_t sec_err:4;
2144 		uint64_t nxm_wr_err:1;
2145 	} cn70xx;
2146 	struct cvmx_lmcx_int_cn70xx cn70xxp1;
2147 	struct cvmx_lmcx_int_s cn73xx;
2148 	struct cvmx_lmcx_int_s cn78xx;
2149 	struct cvmx_lmcx_int_s cn78xxp1;
2150 	struct cvmx_lmcx_int_cn61xx cnf71xx;
2151 	struct cvmx_lmcx_int_s cnf75xx;
2152 };
2153 
2154 /**
2155  * cvmx_lmc#_int_en
2156  *
2157  * Unused CSR in O75.
2158  *
2159  */
2160 union cvmx_lmcx_int_en {
2161 	u64 u64;
2162 	struct cvmx_lmcx_int_en_s {
2163 		uint64_t reserved_6_63:58;
2164 		uint64_t ddr_error_alert_ena:1;
2165 		uint64_t dlcram_ded_ena:1;
2166 		uint64_t dlcram_sec_ena:1;
2167 		uint64_t intr_ded_ena:1;
2168 		uint64_t intr_sec_ena:1;
2169 		uint64_t intr_nxm_wr_ena:1;
2170 	} s;
2171 	struct cvmx_lmcx_int_en_cn61xx {
2172 		uint64_t reserved_3_63:61;
2173 		uint64_t intr_ded_ena:1;
2174 		uint64_t intr_sec_ena:1;
2175 		uint64_t intr_nxm_wr_ena:1;
2176 	} cn61xx;
2177 	struct cvmx_lmcx_int_en_cn61xx cn63xx;
2178 	struct cvmx_lmcx_int_en_cn61xx cn63xxp1;
2179 	struct cvmx_lmcx_int_en_cn61xx cn66xx;
2180 	struct cvmx_lmcx_int_en_cn61xx cn68xx;
2181 	struct cvmx_lmcx_int_en_cn61xx cn68xxp1;
2182 	struct cvmx_lmcx_int_en_s cn70xx;
2183 	struct cvmx_lmcx_int_en_s cn70xxp1;
2184 	struct cvmx_lmcx_int_en_s cn73xx;
2185 	struct cvmx_lmcx_int_en_s cn78xx;
2186 	struct cvmx_lmcx_int_en_s cn78xxp1;
2187 	struct cvmx_lmcx_int_en_cn61xx cnf71xx;
2188 	struct cvmx_lmcx_int_en_s cnf75xx;
2189 };
2190 
2191 /**
2192  * cvmx_lmc#_lane#_crc_swiz
2193  *
2194  * This register contains the CRC bit swizzle for even and odd ranks.
2195  *
2196  */
2197 union cvmx_lmcx_lanex_crc_swiz {
2198 	u64 u64;
2199 	struct cvmx_lmcx_lanex_crc_swiz_s {
2200 		uint64_t reserved_56_63:8;
2201 		uint64_t r1_swiz7:3;
2202 		uint64_t r1_swiz6:3;
2203 		uint64_t r1_swiz5:3;
2204 		uint64_t r1_swiz4:3;
2205 		uint64_t r1_swiz3:3;
2206 		uint64_t r1_swiz2:3;
2207 		uint64_t r1_swiz1:3;
2208 		uint64_t r1_swiz0:3;
2209 		uint64_t reserved_24_31:8;
2210 		uint64_t r0_swiz7:3;
2211 		uint64_t r0_swiz6:3;
2212 		uint64_t r0_swiz5:3;
2213 		uint64_t r0_swiz4:3;
2214 		uint64_t r0_swiz3:3;
2215 		uint64_t r0_swiz2:3;
2216 		uint64_t r0_swiz1:3;
2217 		uint64_t r0_swiz0:3;
2218 	} s;
2219 	struct cvmx_lmcx_lanex_crc_swiz_s cn73xx;
2220 	struct cvmx_lmcx_lanex_crc_swiz_s cn78xx;
2221 	struct cvmx_lmcx_lanex_crc_swiz_s cn78xxp1;
2222 	struct cvmx_lmcx_lanex_crc_swiz_s cnf75xx;
2223 };
2224 
2225 /**
2226  * cvmx_lmc#_mem_cfg0
2227  *
2228  * Specify the RSL base addresses for the block
2229  *
2230  *                  LMC_MEM_CFG0 = LMC Memory Configuration Register0
2231  *
2232  * This register controls certain parameters of  Memory Configuration
2233  */
2234 union cvmx_lmcx_mem_cfg0 {
2235 	u64 u64;
2236 	struct cvmx_lmcx_mem_cfg0_s {
2237 		uint64_t reserved_32_63:32;
2238 		uint64_t reset:1;
2239 		uint64_t silo_qc:1;
2240 		uint64_t bunk_ena:1;
2241 		uint64_t ded_err:4;
2242 		uint64_t sec_err:4;
2243 		uint64_t intr_ded_ena:1;
2244 		uint64_t intr_sec_ena:1;
2245 		uint64_t tcl:4;
2246 		uint64_t ref_int:6;
2247 		uint64_t pbank_lsb:4;
2248 		uint64_t row_lsb:3;
2249 		uint64_t ecc_ena:1;
2250 		uint64_t init_start:1;
2251 	} s;
2252 	struct cvmx_lmcx_mem_cfg0_s cn30xx;
2253 	struct cvmx_lmcx_mem_cfg0_s cn31xx;
2254 	struct cvmx_lmcx_mem_cfg0_s cn38xx;
2255 	struct cvmx_lmcx_mem_cfg0_s cn38xxp2;
2256 	struct cvmx_lmcx_mem_cfg0_s cn50xx;
2257 	struct cvmx_lmcx_mem_cfg0_s cn52xx;
2258 	struct cvmx_lmcx_mem_cfg0_s cn52xxp1;
2259 	struct cvmx_lmcx_mem_cfg0_s cn56xx;
2260 	struct cvmx_lmcx_mem_cfg0_s cn56xxp1;
2261 	struct cvmx_lmcx_mem_cfg0_s cn58xx;
2262 	struct cvmx_lmcx_mem_cfg0_s cn58xxp1;
2263 };
2264 
2265 /**
2266  * cvmx_lmc#_mem_cfg1
2267  *
2268  * LMC_MEM_CFG1 = LMC Memory Configuration Register1
2269  *
2270  * This register controls the External Memory Configuration Timing Parameters.
2271  * Please refer to the appropriate DDR part spec from your memory vendor for
2272  * the various values in this CSR. The details of each of these timing
2273  * parameters can be found in the JEDEC spec or the vendor spec of the
2274  * memory parts.
2275  */
2276 union cvmx_lmcx_mem_cfg1 {
2277 	u64 u64;
2278 	struct cvmx_lmcx_mem_cfg1_s {
2279 		uint64_t reserved_32_63:32;
2280 		uint64_t comp_bypass:1;
2281 		uint64_t trrd:3;
2282 		uint64_t caslat:3;
2283 		uint64_t tmrd:3;
2284 		uint64_t trfc:5;
2285 		uint64_t trp:4;
2286 		uint64_t twtr:4;
2287 		uint64_t trcd:4;
2288 		uint64_t tras:5;
2289 	} s;
2290 	struct cvmx_lmcx_mem_cfg1_s cn30xx;
2291 	struct cvmx_lmcx_mem_cfg1_s cn31xx;
2292 	struct cvmx_lmcx_mem_cfg1_cn38xx {
2293 		uint64_t reserved_31_63:33;
2294 		uint64_t trrd:3;
2295 		uint64_t caslat:3;
2296 		uint64_t tmrd:3;
2297 		uint64_t trfc:5;
2298 		uint64_t trp:4;
2299 		uint64_t twtr:4;
2300 		uint64_t trcd:4;
2301 		uint64_t tras:5;
2302 	} cn38xx;
2303 	struct cvmx_lmcx_mem_cfg1_cn38xx cn38xxp2;
2304 	struct cvmx_lmcx_mem_cfg1_s cn50xx;
2305 	struct cvmx_lmcx_mem_cfg1_cn38xx cn52xx;
2306 	struct cvmx_lmcx_mem_cfg1_cn38xx cn52xxp1;
2307 	struct cvmx_lmcx_mem_cfg1_cn38xx cn56xx;
2308 	struct cvmx_lmcx_mem_cfg1_cn38xx cn56xxp1;
2309 	struct cvmx_lmcx_mem_cfg1_cn38xx cn58xx;
2310 	struct cvmx_lmcx_mem_cfg1_cn38xx cn58xxp1;
2311 };
2312 
2313 /**
2314  * cvmx_lmc#_modereg_params0
2315  *
2316  * These parameters are written into the DDR3/DDR4 MR0, MR1, MR2 and MR3
2317  * registers.
2318  *
2319  */
2320 union cvmx_lmcx_modereg_params0 {
2321 	u64 u64;
2322 	struct cvmx_lmcx_modereg_params0_s {
2323 		uint64_t reserved_28_63:36;
2324 		uint64_t wrp_ext:1;
2325 		uint64_t cl_ext:1;
2326 		uint64_t al_ext:1;
2327 		uint64_t ppd:1;
2328 		uint64_t wrp:3;
2329 		uint64_t dllr:1;
2330 		uint64_t tm:1;
2331 		uint64_t rbt:1;
2332 		uint64_t cl:4;
2333 		uint64_t bl:2;
2334 		uint64_t qoff:1;
2335 		uint64_t tdqs:1;
2336 		uint64_t wlev:1;
2337 		uint64_t al:2;
2338 		uint64_t dll:1;
2339 		uint64_t mpr:1;
2340 		uint64_t mprloc:2;
2341 		uint64_t cwl:3;
2342 	} s;
2343 	struct cvmx_lmcx_modereg_params0_cn61xx {
2344 		uint64_t reserved_25_63:39;
2345 		uint64_t ppd:1;
2346 		uint64_t wrp:3;
2347 		uint64_t dllr:1;
2348 		uint64_t tm:1;
2349 		uint64_t rbt:1;
2350 		uint64_t cl:4;
2351 		uint64_t bl:2;
2352 		uint64_t qoff:1;
2353 		uint64_t tdqs:1;
2354 		uint64_t wlev:1;
2355 		uint64_t al:2;
2356 		uint64_t dll:1;
2357 		uint64_t mpr:1;
2358 		uint64_t mprloc:2;
2359 		uint64_t cwl:3;
2360 	} cn61xx;
2361 	struct cvmx_lmcx_modereg_params0_cn61xx cn63xx;
2362 	struct cvmx_lmcx_modereg_params0_cn61xx cn63xxp1;
2363 	struct cvmx_lmcx_modereg_params0_cn61xx cn66xx;
2364 	struct cvmx_lmcx_modereg_params0_cn61xx cn68xx;
2365 	struct cvmx_lmcx_modereg_params0_cn61xx cn68xxp1;
2366 	struct cvmx_lmcx_modereg_params0_cn61xx cn70xx;
2367 	struct cvmx_lmcx_modereg_params0_cn61xx cn70xxp1;
2368 	struct cvmx_lmcx_modereg_params0_s cn73xx;
2369 	struct cvmx_lmcx_modereg_params0_s cn78xx;
2370 	struct cvmx_lmcx_modereg_params0_s cn78xxp1;
2371 	struct cvmx_lmcx_modereg_params0_cn61xx cnf71xx;
2372 	struct cvmx_lmcx_modereg_params0_s cnf75xx;
2373 };
2374 
2375 /**
2376  * cvmx_lmc#_modereg_params1
2377  *
2378  * These parameters are written into the DDR3 MR0, MR1, MR2 and MR3 registers.
2379  *
2380  */
2381 union cvmx_lmcx_modereg_params1 {
2382 	u64 u64;
2383 	struct cvmx_lmcx_modereg_params1_s {
2384 		uint64_t reserved_55_63:9;
2385 		uint64_t rtt_wr_11_ext:1;
2386 		uint64_t rtt_wr_10_ext:1;
2387 		uint64_t rtt_wr_01_ext:1;
2388 		uint64_t rtt_wr_00_ext:1;
2389 		uint64_t db_output_impedance:3;
2390 		uint64_t rtt_nom_11:3;
2391 		uint64_t dic_11:2;
2392 		uint64_t rtt_wr_11:2;
2393 		uint64_t srt_11:1;
2394 		uint64_t asr_11:1;
2395 		uint64_t pasr_11:3;
2396 		uint64_t rtt_nom_10:3;
2397 		uint64_t dic_10:2;
2398 		uint64_t rtt_wr_10:2;
2399 		uint64_t srt_10:1;
2400 		uint64_t asr_10:1;
2401 		uint64_t pasr_10:3;
2402 		uint64_t rtt_nom_01:3;
2403 		uint64_t dic_01:2;
2404 		uint64_t rtt_wr_01:2;
2405 		uint64_t srt_01:1;
2406 		uint64_t asr_01:1;
2407 		uint64_t pasr_01:3;
2408 		uint64_t rtt_nom_00:3;
2409 		uint64_t dic_00:2;
2410 		uint64_t rtt_wr_00:2;
2411 		uint64_t srt_00:1;
2412 		uint64_t asr_00:1;
2413 		uint64_t pasr_00:3;
2414 	} s;
2415 	struct cvmx_lmcx_modereg_params1_cn61xx {
2416 		uint64_t reserved_48_63:16;
2417 		uint64_t rtt_nom_11:3;
2418 		uint64_t dic_11:2;
2419 		uint64_t rtt_wr_11:2;
2420 		uint64_t srt_11:1;
2421 		uint64_t asr_11:1;
2422 		uint64_t pasr_11:3;
2423 		uint64_t rtt_nom_10:3;
2424 		uint64_t dic_10:2;
2425 		uint64_t rtt_wr_10:2;
2426 		uint64_t srt_10:1;
2427 		uint64_t asr_10:1;
2428 		uint64_t pasr_10:3;
2429 		uint64_t rtt_nom_01:3;
2430 		uint64_t dic_01:2;
2431 		uint64_t rtt_wr_01:2;
2432 		uint64_t srt_01:1;
2433 		uint64_t asr_01:1;
2434 		uint64_t pasr_01:3;
2435 		uint64_t rtt_nom_00:3;
2436 		uint64_t dic_00:2;
2437 		uint64_t rtt_wr_00:2;
2438 		uint64_t srt_00:1;
2439 		uint64_t asr_00:1;
2440 		uint64_t pasr_00:3;
2441 	} cn61xx;
2442 	struct cvmx_lmcx_modereg_params1_cn61xx cn63xx;
2443 	struct cvmx_lmcx_modereg_params1_cn61xx cn63xxp1;
2444 	struct cvmx_lmcx_modereg_params1_cn61xx cn66xx;
2445 	struct cvmx_lmcx_modereg_params1_cn61xx cn68xx;
2446 	struct cvmx_lmcx_modereg_params1_cn61xx cn68xxp1;
2447 	struct cvmx_lmcx_modereg_params1_cn61xx cn70xx;
2448 	struct cvmx_lmcx_modereg_params1_cn61xx cn70xxp1;
2449 	struct cvmx_lmcx_modereg_params1_s cn73xx;
2450 	struct cvmx_lmcx_modereg_params1_s cn78xx;
2451 	struct cvmx_lmcx_modereg_params1_s cn78xxp1;
2452 	struct cvmx_lmcx_modereg_params1_cn61xx cnf71xx;
2453 	struct cvmx_lmcx_modereg_params1_s cnf75xx;
2454 };
2455 
2456 /**
2457  * cvmx_lmc#_modereg_params2
2458  *
2459  * These parameters are written into the DDR4 mode registers.
2460  *
2461  */
2462 union cvmx_lmcx_modereg_params2 {
2463 	u64 u64;
2464 	struct cvmx_lmcx_modereg_params2_s {
2465 		uint64_t reserved_41_63:23;
2466 		uint64_t vrefdq_train_en:1;
2467 		uint64_t vref_range_11:1;
2468 		uint64_t vref_value_11:6;
2469 		uint64_t rtt_park_11:3;
2470 		uint64_t vref_range_10:1;
2471 		uint64_t vref_value_10:6;
2472 		uint64_t rtt_park_10:3;
2473 		uint64_t vref_range_01:1;
2474 		uint64_t vref_value_01:6;
2475 		uint64_t rtt_park_01:3;
2476 		uint64_t vref_range_00:1;
2477 		uint64_t vref_value_00:6;
2478 		uint64_t rtt_park_00:3;
2479 	} s;
2480 	struct cvmx_lmcx_modereg_params2_s cn70xx;
2481 	struct cvmx_lmcx_modereg_params2_cn70xxp1 {
2482 		uint64_t reserved_40_63:24;
2483 		uint64_t vref_range_11:1;
2484 		uint64_t vref_value_11:6;
2485 		uint64_t rtt_park_11:3;
2486 		uint64_t vref_range_10:1;
2487 		uint64_t vref_value_10:6;
2488 		uint64_t rtt_park_10:3;
2489 		uint64_t vref_range_01:1;
2490 		uint64_t vref_value_01:6;
2491 		uint64_t rtt_park_01:3;
2492 		uint64_t vref_range_00:1;
2493 		uint64_t vref_value_00:6;
2494 		uint64_t rtt_park_00:3;
2495 	} cn70xxp1;
2496 	struct cvmx_lmcx_modereg_params2_s cn73xx;
2497 	struct cvmx_lmcx_modereg_params2_s cn78xx;
2498 	struct cvmx_lmcx_modereg_params2_s cn78xxp1;
2499 	struct cvmx_lmcx_modereg_params2_s cnf75xx;
2500 };
2501 
2502 /**
2503  * cvmx_lmc#_modereg_params3
2504  *
2505  * These parameters are written into the DDR4 mode registers.
2506  *
2507  */
2508 union cvmx_lmcx_modereg_params3 {
2509 	u64 u64;
2510 	struct cvmx_lmcx_modereg_params3_s {
2511 		uint64_t reserved_39_63:25;
2512 		uint64_t xrank_add_tccd_l:3;
2513 		uint64_t xrank_add_tccd_s:3;
2514 		uint64_t mpr_fmt:2;
2515 		uint64_t wr_cmd_lat:2;
2516 		uint64_t fgrm:3;
2517 		uint64_t temp_sense:1;
2518 		uint64_t pda:1;
2519 		uint64_t gd:1;
2520 		uint64_t crc:1;
2521 		uint64_t lpasr:2;
2522 		uint64_t tccd_l:3;
2523 		uint64_t rd_dbi:1;
2524 		uint64_t wr_dbi:1;
2525 		uint64_t dm:1;
2526 		uint64_t ca_par_pers:1;
2527 		uint64_t odt_pd:1;
2528 		uint64_t par_lat_mode:3;
2529 		uint64_t wr_preamble:1;
2530 		uint64_t rd_preamble:1;
2531 		uint64_t sre_abort:1;
2532 		uint64_t cal:3;
2533 		uint64_t vref_mon:1;
2534 		uint64_t tc_ref:1;
2535 		uint64_t max_pd:1;
2536 	} s;
2537 	struct cvmx_lmcx_modereg_params3_cn70xx {
2538 		uint64_t reserved_33_63:31;
2539 		uint64_t mpr_fmt:2;
2540 		uint64_t wr_cmd_lat:2;
2541 		uint64_t fgrm:3;
2542 		uint64_t temp_sense:1;
2543 		uint64_t pda:1;
2544 		uint64_t gd:1;
2545 		uint64_t crc:1;
2546 		uint64_t lpasr:2;
2547 		uint64_t tccd_l:3;
2548 		uint64_t rd_dbi:1;
2549 		uint64_t wr_dbi:1;
2550 		uint64_t dm:1;
2551 		uint64_t ca_par_pers:1;
2552 		uint64_t odt_pd:1;
2553 		uint64_t par_lat_mode:3;
2554 		uint64_t wr_preamble:1;
2555 		uint64_t rd_preamble:1;
2556 		uint64_t sre_abort:1;
2557 		uint64_t cal:3;
2558 		uint64_t vref_mon:1;
2559 		uint64_t tc_ref:1;
2560 		uint64_t max_pd:1;
2561 	} cn70xx;
2562 	struct cvmx_lmcx_modereg_params3_cn70xx cn70xxp1;
2563 	struct cvmx_lmcx_modereg_params3_s cn73xx;
2564 	struct cvmx_lmcx_modereg_params3_s cn78xx;
2565 	struct cvmx_lmcx_modereg_params3_s cn78xxp1;
2566 	struct cvmx_lmcx_modereg_params3_s cnf75xx;
2567 };
2568 
2569 /**
2570  * cvmx_lmc#_mpr_data0
2571  *
2572  * This register provides bits <63:0> of MPR data register.
2573  *
2574  */
2575 union cvmx_lmcx_mpr_data0 {
2576 	u64 u64;
2577 	struct cvmx_lmcx_mpr_data0_s {
2578 		uint64_t mpr_data:64;
2579 	} s;
2580 	struct cvmx_lmcx_mpr_data0_s cn70xx;
2581 	struct cvmx_lmcx_mpr_data0_s cn70xxp1;
2582 	struct cvmx_lmcx_mpr_data0_s cn73xx;
2583 	struct cvmx_lmcx_mpr_data0_s cn78xx;
2584 	struct cvmx_lmcx_mpr_data0_s cn78xxp1;
2585 	struct cvmx_lmcx_mpr_data0_s cnf75xx;
2586 };
2587 
2588 /**
2589  * cvmx_lmc#_mpr_data1
2590  *
2591  * This register provides bits <127:64> of MPR data register.
2592  *
2593  */
2594 union cvmx_lmcx_mpr_data1 {
2595 	u64 u64;
2596 	struct cvmx_lmcx_mpr_data1_s {
2597 		uint64_t mpr_data:64;
2598 	} s;
2599 	struct cvmx_lmcx_mpr_data1_s cn70xx;
2600 	struct cvmx_lmcx_mpr_data1_s cn70xxp1;
2601 	struct cvmx_lmcx_mpr_data1_s cn73xx;
2602 	struct cvmx_lmcx_mpr_data1_s cn78xx;
2603 	struct cvmx_lmcx_mpr_data1_s cn78xxp1;
2604 	struct cvmx_lmcx_mpr_data1_s cnf75xx;
2605 };
2606 
2607 /**
2608  * cvmx_lmc#_mpr_data2
2609  *
2610  * This register provides bits <143:128> of MPR data register.
2611  *
2612  */
2613 union cvmx_lmcx_mpr_data2 {
2614 	u64 u64;
2615 	struct cvmx_lmcx_mpr_data2_s {
2616 		uint64_t reserved_16_63:48;
2617 		uint64_t mpr_data:16;
2618 	} s;
2619 	struct cvmx_lmcx_mpr_data2_s cn70xx;
2620 	struct cvmx_lmcx_mpr_data2_s cn70xxp1;
2621 	struct cvmx_lmcx_mpr_data2_s cn73xx;
2622 	struct cvmx_lmcx_mpr_data2_s cn78xx;
2623 	struct cvmx_lmcx_mpr_data2_s cn78xxp1;
2624 	struct cvmx_lmcx_mpr_data2_s cnf75xx;
2625 };
2626 
2627 /**
2628  * cvmx_lmc#_mr_mpr_ctl
2629  *
2630  * This register provides the control functions when programming the MPR
2631  * of DDR4 DRAMs.
2632  *
2633  */
2634 union cvmx_lmcx_mr_mpr_ctl {
2635 	u64 u64;
2636 	struct cvmx_lmcx_mr_mpr_ctl_s {
2637 		uint64_t reserved_61_63:3;
2638 		uint64_t mr_wr_secure_key_ena:1;
2639 		uint64_t pba_func_space:3;
2640 		uint64_t mr_wr_bg1:1;
2641 		uint64_t mpr_sample_dq_enable:1;
2642 		uint64_t pda_early_dqx:1;
2643 		uint64_t mr_wr_pba_enable:1;
2644 		uint64_t mr_wr_use_default_value:1;
2645 		uint64_t mpr_whole_byte_enable:1;
2646 		uint64_t mpr_byte_select:4;
2647 		uint64_t mpr_bit_select:2;
2648 		uint64_t mpr_wr:1;
2649 		uint64_t mpr_loc:2;
2650 		uint64_t mr_wr_pda_enable:1;
2651 		uint64_t mr_wr_pda_mask:18;
2652 		uint64_t mr_wr_rank:2;
2653 		uint64_t mr_wr_sel:3;
2654 		uint64_t mr_wr_addr:18;
2655 	} s;
2656 	struct cvmx_lmcx_mr_mpr_ctl_cn70xx {
2657 		uint64_t reserved_52_63:12;
2658 		uint64_t mpr_whole_byte_enable:1;
2659 		uint64_t mpr_byte_select:4;
2660 		uint64_t mpr_bit_select:2;
2661 		uint64_t mpr_wr:1;
2662 		uint64_t mpr_loc:2;
2663 		uint64_t mr_wr_pda_enable:1;
2664 		uint64_t mr_wr_pda_mask:18;
2665 		uint64_t mr_wr_rank:2;
2666 		uint64_t mr_wr_sel:3;
2667 		uint64_t mr_wr_addr:18;
2668 	} cn70xx;
2669 	struct cvmx_lmcx_mr_mpr_ctl_cn70xx cn70xxp1;
2670 	struct cvmx_lmcx_mr_mpr_ctl_s cn73xx;
2671 	struct cvmx_lmcx_mr_mpr_ctl_s cn78xx;
2672 	struct cvmx_lmcx_mr_mpr_ctl_s cn78xxp1;
2673 	struct cvmx_lmcx_mr_mpr_ctl_s cnf75xx;
2674 };
2675 
2676 /**
2677  * cvmx_lmc#_ns_ctl
2678  *
2679  * This register contains control parameters for handling nonsecure accesses.
2680  *
2681  */
2682 union cvmx_lmcx_ns_ctl {
2683 	u64 u64;
2684 	struct cvmx_lmcx_ns_ctl_s {
2685 		uint64_t reserved_26_63:38;
2686 		uint64_t ns_scramble_dis:1;
2687 		uint64_t reserved_18_24:7;
2688 		uint64_t adr_offset:18;
2689 	} s;
2690 	struct cvmx_lmcx_ns_ctl_s cn73xx;
2691 	struct cvmx_lmcx_ns_ctl_s cn78xx;
2692 	struct cvmx_lmcx_ns_ctl_s cnf75xx;
2693 };
2694 
2695 /**
2696  * cvmx_lmc#_nxm
2697  *
2698  * Following is the decoding for mem_msb/rank:
2699  * 0x0: mem_msb = mem_adr[25].
2700  * 0x1: mem_msb = mem_adr[26].
2701  * 0x2: mem_msb = mem_adr[27].
2702  * 0x3: mem_msb = mem_adr[28].
2703  * 0x4: mem_msb = mem_adr[29].
2704  * 0x5: mem_msb = mem_adr[30].
2705  * 0x6: mem_msb = mem_adr[31].
2706  * 0x7: mem_msb = mem_adr[32].
2707  * 0x8: mem_msb = mem_adr[33].
2708  * 0x9: mem_msb = mem_adr[34].
2709  * 0xA: mem_msb = mem_adr[35].
2710  * 0xB: mem_msb = mem_adr[36].
2711  * 0xC-0xF = Reserved.
2712  *
2713  * For example, for a DIMM made of Samsung's K4B1G0846C-ZCF7 1Gb
2714  * (16M * 8 bit * 8 bank) parts, the column address width = 10; so with
2715  * 10b of col, 3b of bus, 3b of bank, row_lsb = 16.
2716  * Therefore, row = mem_adr[29:16] and mem_msb = 4.
2717  *
2718  * Note also that addresses greater than the max defined space (pbank_msb)
2719  * are also treated as NXM accesses.
2720  */
2721 union cvmx_lmcx_nxm {
2722 	u64 u64;
2723 	struct cvmx_lmcx_nxm_s {
2724 		uint64_t reserved_40_63:24;
2725 		uint64_t mem_msb_d3_r1:4;
2726 		uint64_t mem_msb_d3_r0:4;
2727 		uint64_t mem_msb_d2_r1:4;
2728 		uint64_t mem_msb_d2_r0:4;
2729 		uint64_t mem_msb_d1_r1:4;
2730 		uint64_t mem_msb_d1_r0:4;
2731 		uint64_t mem_msb_d0_r1:4;
2732 		uint64_t mem_msb_d0_r0:4;
2733 		uint64_t cs_mask:8;
2734 	} s;
2735 	struct cvmx_lmcx_nxm_cn52xx {
2736 		uint64_t reserved_8_63:56;
2737 		uint64_t cs_mask:8;
2738 	} cn52xx;
2739 	struct cvmx_lmcx_nxm_cn52xx cn56xx;
2740 	struct cvmx_lmcx_nxm_cn52xx cn58xx;
2741 	struct cvmx_lmcx_nxm_s cn61xx;
2742 	struct cvmx_lmcx_nxm_s cn63xx;
2743 	struct cvmx_lmcx_nxm_s cn63xxp1;
2744 	struct cvmx_lmcx_nxm_s cn66xx;
2745 	struct cvmx_lmcx_nxm_s cn68xx;
2746 	struct cvmx_lmcx_nxm_s cn68xxp1;
2747 	struct cvmx_lmcx_nxm_cn70xx {
2748 		uint64_t reserved_24_63:40;
2749 		uint64_t mem_msb_d1_r1:4;
2750 		uint64_t mem_msb_d1_r0:4;
2751 		uint64_t mem_msb_d0_r1:4;
2752 		uint64_t mem_msb_d0_r0:4;
2753 		uint64_t reserved_4_7:4;
2754 		uint64_t cs_mask:4;
2755 	} cn70xx;
2756 	struct cvmx_lmcx_nxm_cn70xx cn70xxp1;
2757 	struct cvmx_lmcx_nxm_cn70xx cn73xx;
2758 	struct cvmx_lmcx_nxm_cn70xx cn78xx;
2759 	struct cvmx_lmcx_nxm_cn70xx cn78xxp1;
2760 	struct cvmx_lmcx_nxm_s cnf71xx;
2761 	struct cvmx_lmcx_nxm_cn70xx cnf75xx;
2762 };
2763 
2764 /**
2765  * cvmx_lmc#_nxm_fadr
2766  *
2767  * This register captures only the first transaction with a NXM error while
2768  * an interrupt is pending, and only captures a subsequent event once the
2769  * interrupt is cleared by writing a one to LMC()_INT[NXM_ERR]. It captures
2770  * the actual L2C-LMC address provided to the LMC that caused the NXM error.
2771  * A read or write NXM error is captured only if enabled using the NXM
2772  * event enables.
2773  */
2774 union cvmx_lmcx_nxm_fadr {
2775 	u64 u64;
2776 	struct cvmx_lmcx_nxm_fadr_s {
2777 		uint64_t reserved_40_63:24;
2778 		uint64_t nxm_faddr_ext:1;
2779 		uint64_t nxm_src:1;
2780 		uint64_t nxm_type:1;
2781 		uint64_t nxm_faddr:37;
2782 	} s;
2783 	struct cvmx_lmcx_nxm_fadr_cn70xx {
2784 		uint64_t reserved_39_63:25;
2785 		uint64_t nxm_src:1;
2786 		uint64_t nxm_type:1;
2787 		uint64_t nxm_faddr:37;
2788 	} cn70xx;
2789 	struct cvmx_lmcx_nxm_fadr_cn70xx cn70xxp1;
2790 	struct cvmx_lmcx_nxm_fadr_s cn73xx;
2791 	struct cvmx_lmcx_nxm_fadr_s cn78xx;
2792 	struct cvmx_lmcx_nxm_fadr_s cn78xxp1;
2793 	struct cvmx_lmcx_nxm_fadr_s cnf75xx;
2794 };
2795 
2796 /**
2797  * cvmx_lmc#_ops_cnt
2798  *
2799  * LMC_OPS_CNT  = Performance Counters
2800  *
2801  */
2802 union cvmx_lmcx_ops_cnt {
2803 	u64 u64;
2804 	struct cvmx_lmcx_ops_cnt_s {
2805 		uint64_t opscnt:64;
2806 	} s;
2807 	struct cvmx_lmcx_ops_cnt_s cn61xx;
2808 	struct cvmx_lmcx_ops_cnt_s cn63xx;
2809 	struct cvmx_lmcx_ops_cnt_s cn63xxp1;
2810 	struct cvmx_lmcx_ops_cnt_s cn66xx;
2811 	struct cvmx_lmcx_ops_cnt_s cn68xx;
2812 	struct cvmx_lmcx_ops_cnt_s cn68xxp1;
2813 	struct cvmx_lmcx_ops_cnt_s cn70xx;
2814 	struct cvmx_lmcx_ops_cnt_s cn70xxp1;
2815 	struct cvmx_lmcx_ops_cnt_s cn73xx;
2816 	struct cvmx_lmcx_ops_cnt_s cn78xx;
2817 	struct cvmx_lmcx_ops_cnt_s cn78xxp1;
2818 	struct cvmx_lmcx_ops_cnt_s cnf71xx;
2819 	struct cvmx_lmcx_ops_cnt_s cnf75xx;
2820 };
2821 
2822 /**
2823  * cvmx_lmc#_ops_cnt_hi
2824  *
2825  * LMC_OPS_CNT_HI  = Performance Counters
2826  *
2827  */
2828 union cvmx_lmcx_ops_cnt_hi {
2829 	u64 u64;
2830 	struct cvmx_lmcx_ops_cnt_hi_s {
2831 		uint64_t reserved_32_63:32;
2832 		uint64_t opscnt_hi:32;
2833 	} s;
2834 	struct cvmx_lmcx_ops_cnt_hi_s cn30xx;
2835 	struct cvmx_lmcx_ops_cnt_hi_s cn31xx;
2836 	struct cvmx_lmcx_ops_cnt_hi_s cn38xx;
2837 	struct cvmx_lmcx_ops_cnt_hi_s cn38xxp2;
2838 	struct cvmx_lmcx_ops_cnt_hi_s cn50xx;
2839 	struct cvmx_lmcx_ops_cnt_hi_s cn52xx;
2840 	struct cvmx_lmcx_ops_cnt_hi_s cn52xxp1;
2841 	struct cvmx_lmcx_ops_cnt_hi_s cn56xx;
2842 	struct cvmx_lmcx_ops_cnt_hi_s cn56xxp1;
2843 	struct cvmx_lmcx_ops_cnt_hi_s cn58xx;
2844 	struct cvmx_lmcx_ops_cnt_hi_s cn58xxp1;
2845 };
2846 
2847 /**
2848  * cvmx_lmc#_ops_cnt_lo
2849  *
2850  * LMC_OPS_CNT_LO  = Performance Counters
2851  *
2852  */
2853 union cvmx_lmcx_ops_cnt_lo {
2854 	u64 u64;
2855 	struct cvmx_lmcx_ops_cnt_lo_s {
2856 		uint64_t reserved_32_63:32;
2857 		uint64_t opscnt_lo:32;
2858 	} s;
2859 	struct cvmx_lmcx_ops_cnt_lo_s cn30xx;
2860 	struct cvmx_lmcx_ops_cnt_lo_s cn31xx;
2861 	struct cvmx_lmcx_ops_cnt_lo_s cn38xx;
2862 	struct cvmx_lmcx_ops_cnt_lo_s cn38xxp2;
2863 	struct cvmx_lmcx_ops_cnt_lo_s cn50xx;
2864 	struct cvmx_lmcx_ops_cnt_lo_s cn52xx;
2865 	struct cvmx_lmcx_ops_cnt_lo_s cn52xxp1;
2866 	struct cvmx_lmcx_ops_cnt_lo_s cn56xx;
2867 	struct cvmx_lmcx_ops_cnt_lo_s cn56xxp1;
2868 	struct cvmx_lmcx_ops_cnt_lo_s cn58xx;
2869 	struct cvmx_lmcx_ops_cnt_lo_s cn58xxp1;
2870 };
2871 
2872 /**
2873  * cvmx_lmc#_phy_ctl
2874  *
2875  * LMC_PHY_CTL = LMC PHY Control
2876  *
2877  */
2878 union cvmx_lmcx_phy_ctl {
2879 	u64 u64;
2880 	struct cvmx_lmcx_phy_ctl_s {
2881 		uint64_t reserved_61_63:3;
2882 		uint64_t dsk_dbg_load_dis:1;
2883 		uint64_t dsk_dbg_overwrt_ena:1;
2884 		uint64_t dsk_dbg_wr_mode:1;
2885 		uint64_t data_rate_loopback:1;
2886 		uint64_t dq_shallow_loopback:1;
2887 		uint64_t dm_disable:1;
2888 		uint64_t c1_sel:2;
2889 		uint64_t c0_sel:2;
2890 		uint64_t phy_reset:1;
2891 		uint64_t dsk_dbg_rd_complete:1;
2892 		uint64_t dsk_dbg_rd_data:10;
2893 		uint64_t dsk_dbg_rd_start:1;
2894 		uint64_t dsk_dbg_clk_scaler:2;
2895 		uint64_t dsk_dbg_offset:2;
2896 		uint64_t dsk_dbg_num_bits_sel:1;
2897 		uint64_t dsk_dbg_byte_sel:4;
2898 		uint64_t dsk_dbg_bit_sel:4;
2899 		uint64_t dbi_mode_ena:1;
2900 		uint64_t ddr_error_n_ena:1;
2901 		uint64_t ref_pin_on:1;
2902 		uint64_t dac_on:1;
2903 		uint64_t int_pad_loopback_ena:1;
2904 		uint64_t int_phy_loopback_ena:1;
2905 		uint64_t phy_dsk_reset:1;
2906 		uint64_t phy_dsk_byp:1;
2907 		uint64_t phy_pwr_save_disable:1;
2908 		uint64_t ten:1;
2909 		uint64_t rx_always_on:1;
2910 		uint64_t lv_mode:1;
2911 		uint64_t ck_tune1:1;
2912 		uint64_t ck_dlyout1:4;
2913 		uint64_t ck_tune0:1;
2914 		uint64_t ck_dlyout0:4;
2915 		uint64_t loopback:1;
2916 		uint64_t loopback_pos:1;
2917 		uint64_t ts_stagger:1;
2918 	} s;
2919 	struct cvmx_lmcx_phy_ctl_cn61xx {
2920 		uint64_t reserved_15_63:49;
2921 		uint64_t rx_always_on:1;
2922 		uint64_t lv_mode:1;
2923 		uint64_t ck_tune1:1;
2924 		uint64_t ck_dlyout1:4;
2925 		uint64_t ck_tune0:1;
2926 		uint64_t ck_dlyout0:4;
2927 		uint64_t loopback:1;
2928 		uint64_t loopback_pos:1;
2929 		uint64_t ts_stagger:1;
2930 	} cn61xx;
2931 	struct cvmx_lmcx_phy_ctl_cn61xx cn63xx;
2932 	struct cvmx_lmcx_phy_ctl_cn63xxp1 {
2933 		uint64_t reserved_14_63:50;
2934 		uint64_t lv_mode:1;
2935 		uint64_t ck_tune1:1;
2936 		uint64_t ck_dlyout1:4;
2937 		uint64_t ck_tune0:1;
2938 		uint64_t ck_dlyout0:4;
2939 		uint64_t loopback:1;
2940 		uint64_t loopback_pos:1;
2941 		uint64_t ts_stagger:1;
2942 	} cn63xxp1;
2943 	struct cvmx_lmcx_phy_ctl_cn61xx cn66xx;
2944 	struct cvmx_lmcx_phy_ctl_cn61xx cn68xx;
2945 	struct cvmx_lmcx_phy_ctl_cn61xx cn68xxp1;
2946 	struct cvmx_lmcx_phy_ctl_cn70xx {
2947 		uint64_t reserved_51_63:13;
2948 		uint64_t phy_reset:1;
2949 		uint64_t dsk_dbg_rd_complete:1;
2950 		uint64_t dsk_dbg_rd_data:10;
2951 		uint64_t dsk_dbg_rd_start:1;
2952 		uint64_t dsk_dbg_clk_scaler:2;
2953 		uint64_t dsk_dbg_offset:2;
2954 		uint64_t dsk_dbg_num_bits_sel:1;
2955 		uint64_t dsk_dbg_byte_sel:4;
2956 		uint64_t dsk_dbg_bit_sel:4;
2957 		uint64_t dbi_mode_ena:1;
2958 		uint64_t ddr_error_n_ena:1;
2959 		uint64_t ref_pin_on:1;
2960 		uint64_t dac_on:1;
2961 		uint64_t int_pad_loopback_ena:1;
2962 		uint64_t int_phy_loopback_ena:1;
2963 		uint64_t phy_dsk_reset:1;
2964 		uint64_t phy_dsk_byp:1;
2965 		uint64_t phy_pwr_save_disable:1;
2966 		uint64_t ten:1;
2967 		uint64_t rx_always_on:1;
2968 		uint64_t lv_mode:1;
2969 		uint64_t ck_tune1:1;
2970 		uint64_t ck_dlyout1:4;
2971 		uint64_t ck_tune0:1;
2972 		uint64_t ck_dlyout0:4;
2973 		uint64_t loopback:1;
2974 		uint64_t loopback_pos:1;
2975 		uint64_t ts_stagger:1;
2976 	} cn70xx;
2977 	struct cvmx_lmcx_phy_ctl_cn70xx cn70xxp1;
2978 	struct cvmx_lmcx_phy_ctl_cn73xx {
2979 		uint64_t reserved_58_63:6;
2980 		uint64_t data_rate_loopback:1;
2981 		uint64_t dq_shallow_loopback:1;
2982 		uint64_t dm_disable:1;
2983 		uint64_t c1_sel:2;
2984 		uint64_t c0_sel:2;
2985 		uint64_t phy_reset:1;
2986 		uint64_t dsk_dbg_rd_complete:1;
2987 		uint64_t dsk_dbg_rd_data:10;
2988 		uint64_t dsk_dbg_rd_start:1;
2989 		uint64_t dsk_dbg_clk_scaler:2;
2990 		uint64_t dsk_dbg_offset:2;
2991 		uint64_t dsk_dbg_num_bits_sel:1;
2992 		uint64_t dsk_dbg_byte_sel:4;
2993 		uint64_t dsk_dbg_bit_sel:4;
2994 		uint64_t dbi_mode_ena:1;
2995 		uint64_t ddr_error_n_ena:1;
2996 		uint64_t ref_pin_on:1;
2997 		uint64_t dac_on:1;
2998 		uint64_t int_pad_loopback_ena:1;
2999 		uint64_t int_phy_loopback_ena:1;
3000 		uint64_t phy_dsk_reset:1;
3001 		uint64_t phy_dsk_byp:1;
3002 		uint64_t phy_pwr_save_disable:1;
3003 		uint64_t ten:1;
3004 		uint64_t rx_always_on:1;
3005 		uint64_t lv_mode:1;
3006 		uint64_t ck_tune1:1;
3007 		uint64_t ck_dlyout1:4;
3008 		uint64_t ck_tune0:1;
3009 		uint64_t ck_dlyout0:4;
3010 		uint64_t loopback:1;
3011 		uint64_t loopback_pos:1;
3012 		uint64_t ts_stagger:1;
3013 	} cn73xx;
3014 	struct cvmx_lmcx_phy_ctl_s cn78xx;
3015 	struct cvmx_lmcx_phy_ctl_s cn78xxp1;
3016 	struct cvmx_lmcx_phy_ctl_cn61xx cnf71xx;
3017 	struct cvmx_lmcx_phy_ctl_s cnf75xx;
3018 };
3019 
3020 /**
3021  * cvmx_lmc#_phy_ctl2
3022  */
3023 union cvmx_lmcx_phy_ctl2 {
3024 	u64 u64;
3025 	struct cvmx_lmcx_phy_ctl2_s {
3026 		uint64_t reserved_27_63:37;
3027 		uint64_t dqs8_dsk_adj:3;
3028 		uint64_t dqs7_dsk_adj:3;
3029 		uint64_t dqs6_dsk_adj:3;
3030 		uint64_t dqs5_dsk_adj:3;
3031 		uint64_t dqs4_dsk_adj:3;
3032 		uint64_t dqs3_dsk_adj:3;
3033 		uint64_t dqs2_dsk_adj:3;
3034 		uint64_t dqs1_dsk_adj:3;
3035 		uint64_t dqs0_dsk_adj:3;
3036 	} s;
3037 	struct cvmx_lmcx_phy_ctl2_s cn78xx;
3038 	struct cvmx_lmcx_phy_ctl2_s cnf75xx;
3039 };
3040 
3041 /**
3042  * cvmx_lmc#_pll_bwctl
3043  *
3044  * LMC_PLL_BWCTL  = DDR PLL Bandwidth Control Register
3045  *
3046  */
3047 union cvmx_lmcx_pll_bwctl {
3048 	u64 u64;
3049 	struct cvmx_lmcx_pll_bwctl_s {
3050 		uint64_t reserved_5_63:59;
3051 		uint64_t bwupd:1;
3052 		uint64_t bwctl:4;
3053 	} s;
3054 	struct cvmx_lmcx_pll_bwctl_s cn30xx;
3055 	struct cvmx_lmcx_pll_bwctl_s cn31xx;
3056 	struct cvmx_lmcx_pll_bwctl_s cn38xx;
3057 	struct cvmx_lmcx_pll_bwctl_s cn38xxp2;
3058 };
3059 
3060 /**
3061  * cvmx_lmc#_pll_ctl
3062  *
3063  * LMC_PLL_CTL = LMC pll control
3064  *
3065  *
3066  * Notes:
3067  * This CSR is only relevant for LMC0. LMC1_PLL_CTL is not used.
3068  *
3069  * Exactly one of EN2, EN4, EN6, EN8, EN12, EN16 must be set.
3070  *
3071  * The resultant DDR_CK frequency is the DDR2_REF_CLK
3072  * frequency multiplied by:
3073  *
3074  *     (CLKF + 1) / ((CLKR + 1) * EN(2,4,6,8,12,16))
3075  *
3076  * The PLL frequency, which is:
3077  *
3078  *     (DDR2_REF_CLK freq) * ((CLKF + 1) / (CLKR + 1))
3079  *
3080  * must reside between 1.2 and 2.5 GHz. A faster PLL frequency is
3081  * desirable if there is a choice.
3082  */
3083 union cvmx_lmcx_pll_ctl {
3084 	u64 u64;
3085 	struct cvmx_lmcx_pll_ctl_s {
3086 		uint64_t reserved_30_63:34;
3087 		uint64_t bypass:1;
3088 		uint64_t fasten_n:1;
3089 		uint64_t div_reset:1;
3090 		uint64_t reset_n:1;
3091 		uint64_t clkf:12;
3092 		uint64_t clkr:6;
3093 		uint64_t reserved_6_7:2;
3094 		uint64_t en16:1;
3095 		uint64_t en12:1;
3096 		uint64_t en8:1;
3097 		uint64_t en6:1;
3098 		uint64_t en4:1;
3099 		uint64_t en2:1;
3100 	} s;
3101 	struct cvmx_lmcx_pll_ctl_cn50xx {
3102 		uint64_t reserved_29_63:35;
3103 		uint64_t fasten_n:1;
3104 		uint64_t div_reset:1;
3105 		uint64_t reset_n:1;
3106 		uint64_t clkf:12;
3107 		uint64_t clkr:6;
3108 		uint64_t reserved_6_7:2;
3109 		uint64_t en16:1;
3110 		uint64_t en12:1;
3111 		uint64_t en8:1;
3112 		uint64_t en6:1;
3113 		uint64_t en4:1;
3114 		uint64_t en2:1;
3115 	} cn50xx;
3116 	struct cvmx_lmcx_pll_ctl_s cn52xx;
3117 	struct cvmx_lmcx_pll_ctl_s cn52xxp1;
3118 	struct cvmx_lmcx_pll_ctl_cn50xx cn56xx;
3119 	struct cvmx_lmcx_pll_ctl_cn56xxp1 {
3120 		uint64_t reserved_28_63:36;
3121 		uint64_t div_reset:1;
3122 		uint64_t reset_n:1;
3123 		uint64_t clkf:12;
3124 		uint64_t clkr:6;
3125 		uint64_t reserved_6_7:2;
3126 		uint64_t en16:1;
3127 		uint64_t en12:1;
3128 		uint64_t en8:1;
3129 		uint64_t en6:1;
3130 		uint64_t en4:1;
3131 		uint64_t en2:1;
3132 	} cn56xxp1;
3133 	struct cvmx_lmcx_pll_ctl_cn56xxp1 cn58xx;
3134 	struct cvmx_lmcx_pll_ctl_cn56xxp1 cn58xxp1;
3135 };
3136 
3137 /**
3138  * cvmx_lmc#_pll_status
3139  *
3140  * LMC_PLL_STATUS = LMC pll status
3141  *
3142  */
3143 union cvmx_lmcx_pll_status {
3144 	u64 u64;
3145 	struct cvmx_lmcx_pll_status_s {
3146 		uint64_t reserved_32_63:32;
3147 		uint64_t ddr__nctl:5;
3148 		uint64_t ddr__pctl:5;
3149 		uint64_t reserved_2_21:20;
3150 		uint64_t rfslip:1;
3151 		uint64_t fbslip:1;
3152 	} s;
3153 	struct cvmx_lmcx_pll_status_s cn50xx;
3154 	struct cvmx_lmcx_pll_status_s cn52xx;
3155 	struct cvmx_lmcx_pll_status_s cn52xxp1;
3156 	struct cvmx_lmcx_pll_status_s cn56xx;
3157 	struct cvmx_lmcx_pll_status_s cn56xxp1;
3158 	struct cvmx_lmcx_pll_status_s cn58xx;
3159 	struct cvmx_lmcx_pll_status_cn58xxp1 {
3160 		uint64_t reserved_2_63:62;
3161 		uint64_t rfslip:1;
3162 		uint64_t fbslip:1;
3163 	} cn58xxp1;
3164 };
3165 
3166 /**
3167  * cvmx_lmc#_ppr_ctl
3168  *
3169  * This register contains programmable timing and control parameters used
3170  * when running the post package repair sequence. The timing fields
3171  * PPR_CTL[TPGMPST], PPR_CTL[TPGM_EXIT] and PPR_CTL[TPGM] need to be set as
3172  * to satisfy the minimum values mentioned in the JEDEC DDR4 spec before
3173  * running the PPR sequence. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] to run
3174  * the PPR sequence.
3175  *
3176  * Running hard PPR may require LMC to issue security key as four consecutive
3177  * MR0 commands, each with a unique address field A[17:0]. Set the security
3178  * key in the general purpose CSRs as follows:
3179  *
3180  * _ Security key 0 = LMC()_GENERAL_PURPOSE0[DATA]<17:0>.
3181  * _ Security key 1 = LMC()_GENERAL_PURPOSE0[DATA]<35:18>.
3182  * _ Security key 2 = LMC()_GENERAL_PURPOSE1[DATA]<17:0>.
3183  * _ Security key 3 = LMC()_GENERAL_PURPOSE1[DATA]<35:18>.
3184  */
3185 union cvmx_lmcx_ppr_ctl {
3186 	u64 u64;
3187 	struct cvmx_lmcx_ppr_ctl_s {
3188 		uint64_t reserved_27_63:37;
3189 		uint64_t lrank_sel:3;
3190 		uint64_t skip_issue_security:1;
3191 		uint64_t sppr:1;
3192 		uint64_t tpgm:10;
3193 		uint64_t tpgm_exit:5;
3194 		uint64_t tpgmpst:7;
3195 	} s;
3196 	struct cvmx_lmcx_ppr_ctl_cn73xx {
3197 		uint64_t reserved_24_63:40;
3198 		uint64_t skip_issue_security:1;
3199 		uint64_t sppr:1;
3200 		uint64_t tpgm:10;
3201 		uint64_t tpgm_exit:5;
3202 		uint64_t tpgmpst:7;
3203 	} cn73xx;
3204 	struct cvmx_lmcx_ppr_ctl_s cn78xx;
3205 	struct cvmx_lmcx_ppr_ctl_cn73xx cnf75xx;
3206 };
3207 
3208 /**
3209  * cvmx_lmc#_read_level_ctl
3210  *
3211  * Notes:
3212  * The HW writes and reads the cache block selected by ROW, COL, BNK and
3213  * the rank as part of a read-leveling sequence for a rank.
3214  * A cache block write is 16 72-bit words. PATTERN selects the write value.
3215  * For the first 8 words, the write value is the bit PATTERN<i> duplicated
3216  * into a 72-bit vector. The write value of the last 8 words is the inverse
3217  * of the write value of the first 8 words. See LMC*_READ_LEVEL_RANK*.
3218  */
3219 union cvmx_lmcx_read_level_ctl {
3220 	u64 u64;
3221 	struct cvmx_lmcx_read_level_ctl_s {
3222 		uint64_t reserved_44_63:20;
3223 		uint64_t rankmask:4;
3224 		uint64_t pattern:8;
3225 		uint64_t row:16;
3226 		uint64_t col:12;
3227 		uint64_t reserved_3_3:1;
3228 		uint64_t bnk:3;
3229 	} s;
3230 	struct cvmx_lmcx_read_level_ctl_s cn52xx;
3231 	struct cvmx_lmcx_read_level_ctl_s cn52xxp1;
3232 	struct cvmx_lmcx_read_level_ctl_s cn56xx;
3233 	struct cvmx_lmcx_read_level_ctl_s cn56xxp1;
3234 };
3235 
3236 /**
3237  * cvmx_lmc#_read_level_dbg
3238  *
3239  * Notes:
3240  * A given read of LMC*_READ_LEVEL_DBG returns the read-leveling pass/fail
3241  * results for all possible delay settings (i.e. the BITMASK) for only one
3242  * byte in the last rank that the HW read-leveled.
3243  * LMC*_READ_LEVEL_DBG[BYTE] selects the particular byte.
3244  * To get these pass/fail results for another different rank, you must run
3245  * the hardware read-leveling again. For example, it is possible to get the
3246  * BITMASK results for every byte of every rank if you run read-leveling
3247  * separately for each rank, probing LMC*_READ_LEVEL_DBG between each
3248  * read-leveling.
3249  */
3250 union cvmx_lmcx_read_level_dbg {
3251 	u64 u64;
3252 	struct cvmx_lmcx_read_level_dbg_s {
3253 		uint64_t reserved_32_63:32;
3254 		uint64_t bitmask:16;
3255 		uint64_t reserved_4_15:12;
3256 		uint64_t byte:4;
3257 	} s;
3258 	struct cvmx_lmcx_read_level_dbg_s cn52xx;
3259 	struct cvmx_lmcx_read_level_dbg_s cn52xxp1;
3260 	struct cvmx_lmcx_read_level_dbg_s cn56xx;
3261 	struct cvmx_lmcx_read_level_dbg_s cn56xxp1;
3262 };
3263 
3264 /**
3265  * cvmx_lmc#_read_level_rank#
3266  *
3267  * Notes:
3268  * This is four CSRs per LMC, one per each rank.
3269  * Each CSR is written by HW during a read-leveling sequence for the rank.
3270  * (HW sets STATUS==3 after HW read-leveling completes for the rank.)
3271  * Each CSR may also be written by SW, but not while a read-leveling sequence
3272  * is in progress. (HW sets STATUS==1 after a CSR write.)
3273  * Deskew setting is measured in units of 1/4 DCLK, so the above BYTE*
3274  * values can range over 4 DCLKs.
3275  * SW initiates a HW read-leveling sequence by programming
3276  * LMC*_READ_LEVEL_CTL and writing INIT_START=1 with SEQUENCE=1.
3277  * See LMC*_READ_LEVEL_CTL.
3278  */
3279 union cvmx_lmcx_read_level_rankx {
3280 	u64 u64;
3281 	struct cvmx_lmcx_read_level_rankx_s {
3282 		uint64_t reserved_38_63:26;
3283 		uint64_t status:2;
3284 		uint64_t byte8:4;
3285 		uint64_t byte7:4;
3286 		uint64_t byte6:4;
3287 		uint64_t byte5:4;
3288 		uint64_t byte4:4;
3289 		uint64_t byte3:4;
3290 		uint64_t byte2:4;
3291 		uint64_t byte1:4;
3292 		uint64_t byte0:4;
3293 	} s;
3294 	struct cvmx_lmcx_read_level_rankx_s cn52xx;
3295 	struct cvmx_lmcx_read_level_rankx_s cn52xxp1;
3296 	struct cvmx_lmcx_read_level_rankx_s cn56xx;
3297 	struct cvmx_lmcx_read_level_rankx_s cn56xxp1;
3298 };
3299 
3300 /**
3301  * cvmx_lmc#_ref_status
3302  *
3303  * This register contains the status of the refresh pending counter.
3304  *
3305  */
3306 union cvmx_lmcx_ref_status {
3307 	u64 u64;
3308 	struct cvmx_lmcx_ref_status_s {
3309 		uint64_t reserved_4_63:60;
3310 		uint64_t ref_pend_max_clr:1;
3311 		uint64_t ref_count:3;
3312 	} s;
3313 	struct cvmx_lmcx_ref_status_s cn73xx;
3314 	struct cvmx_lmcx_ref_status_s cn78xx;
3315 	struct cvmx_lmcx_ref_status_s cnf75xx;
3316 };
3317 
3318 /**
3319  * cvmx_lmc#_reset_ctl
3320  *
3321  * Specify the RSL base addresses for the block.
3322  *
3323  */
3324 union cvmx_lmcx_reset_ctl {
3325 	u64 u64;
3326 	struct cvmx_lmcx_reset_ctl_s {
3327 		uint64_t reserved_4_63:60;
3328 		uint64_t ddr3psv:1;
3329 		uint64_t ddr3psoft:1;
3330 		uint64_t ddr3pwarm:1;
3331 		uint64_t ddr3rst:1;
3332 	} s;
3333 	struct cvmx_lmcx_reset_ctl_s cn61xx;
3334 	struct cvmx_lmcx_reset_ctl_s cn63xx;
3335 	struct cvmx_lmcx_reset_ctl_s cn63xxp1;
3336 	struct cvmx_lmcx_reset_ctl_s cn66xx;
3337 	struct cvmx_lmcx_reset_ctl_s cn68xx;
3338 	struct cvmx_lmcx_reset_ctl_s cn68xxp1;
3339 	struct cvmx_lmcx_reset_ctl_s cn70xx;
3340 	struct cvmx_lmcx_reset_ctl_s cn70xxp1;
3341 	struct cvmx_lmcx_reset_ctl_s cn73xx;
3342 	struct cvmx_lmcx_reset_ctl_s cn78xx;
3343 	struct cvmx_lmcx_reset_ctl_s cn78xxp1;
3344 	struct cvmx_lmcx_reset_ctl_s cnf71xx;
3345 	struct cvmx_lmcx_reset_ctl_s cnf75xx;
3346 };
3347 
3348 /**
3349  * cvmx_lmc#_retry_config
3350  *
3351  * This register configures automatic retry operation.
3352  *
3353  */
3354 union cvmx_lmcx_retry_config {
3355 	u64 u64;
3356 	struct cvmx_lmcx_retry_config_s {
3357 		uint64_t reserved_56_63:8;
3358 		uint64_t max_errors:24;
3359 		uint64_t reserved_13_31:19;
3360 		uint64_t error_continue:1;
3361 		uint64_t reserved_9_11:3;
3362 		uint64_t auto_error_continue:1;
3363 		uint64_t reserved_5_7:3;
3364 		uint64_t pulse_count_auto_clr:1;
3365 		uint64_t reserved_1_3:3;
3366 		uint64_t retry_enable:1;
3367 	} s;
3368 	struct cvmx_lmcx_retry_config_s cn73xx;
3369 	struct cvmx_lmcx_retry_config_s cn78xx;
3370 	struct cvmx_lmcx_retry_config_s cnf75xx;
3371 };
3372 
3373 /**
3374  * cvmx_lmc#_retry_status
3375  *
3376  * This register provides status on automatic retry operation.
3377  *
3378  */
3379 union cvmx_lmcx_retry_status {
3380 	u64 u64;
3381 	struct cvmx_lmcx_retry_status_s {
3382 		uint64_t clear_error_count:1;
3383 		uint64_t clear_error_pulse_count:1;
3384 		uint64_t reserved_57_61:5;
3385 		uint64_t error_pulse_count_valid:1;
3386 		uint64_t error_pulse_count_sat:1;
3387 		uint64_t reserved_52_54:3;
3388 		uint64_t error_pulse_count:4;
3389 		uint64_t reserved_45_47:3;
3390 		uint64_t error_sequence:5;
3391 		uint64_t reserved_33_39:7;
3392 		uint64_t error_type:1;
3393 		uint64_t reserved_24_31:8;
3394 		uint64_t error_count:24;
3395 	} s;
3396 	struct cvmx_lmcx_retry_status_s cn73xx;
3397 	struct cvmx_lmcx_retry_status_s cn78xx;
3398 	struct cvmx_lmcx_retry_status_s cnf75xx;
3399 };
3400 
3401 /**
3402  * cvmx_lmc#_rlevel_ctl
3403  */
3404 union cvmx_lmcx_rlevel_ctl {
3405 	u64 u64;
3406 	struct cvmx_lmcx_rlevel_ctl_s {
3407 		uint64_t reserved_33_63:31;
3408 		uint64_t tccd_sel:1;
3409 		uint64_t pattern:8;
3410 		uint64_t reserved_22_23:2;
3411 		uint64_t delay_unload_3:1;
3412 		uint64_t delay_unload_2:1;
3413 		uint64_t delay_unload_1:1;
3414 		uint64_t delay_unload_0:1;
3415 		uint64_t bitmask:8;
3416 		uint64_t or_dis:1;
3417 		uint64_t offset_en:1;
3418 		uint64_t offset:4;
3419 		uint64_t byte:4;
3420 	} s;
3421 	struct cvmx_lmcx_rlevel_ctl_cn61xx {
3422 		uint64_t reserved_22_63:42;
3423 		uint64_t delay_unload_3:1;
3424 		uint64_t delay_unload_2:1;
3425 		uint64_t delay_unload_1:1;
3426 		uint64_t delay_unload_0:1;
3427 		uint64_t bitmask:8;
3428 		uint64_t or_dis:1;
3429 		uint64_t offset_en:1;
3430 		uint64_t offset:4;
3431 		uint64_t byte:4;
3432 	} cn61xx;
3433 	struct cvmx_lmcx_rlevel_ctl_cn61xx cn63xx;
3434 	struct cvmx_lmcx_rlevel_ctl_cn63xxp1 {
3435 		uint64_t reserved_9_63:55;
3436 		uint64_t offset_en:1;
3437 		uint64_t offset:4;
3438 		uint64_t byte:4;
3439 	} cn63xxp1;
3440 	struct cvmx_lmcx_rlevel_ctl_cn61xx cn66xx;
3441 	struct cvmx_lmcx_rlevel_ctl_cn61xx cn68xx;
3442 	struct cvmx_lmcx_rlevel_ctl_cn61xx cn68xxp1;
3443 	struct cvmx_lmcx_rlevel_ctl_cn70xx {
3444 		uint64_t reserved_32_63:32;
3445 		uint64_t pattern:8;
3446 		uint64_t reserved_22_23:2;
3447 		uint64_t delay_unload_3:1;
3448 		uint64_t delay_unload_2:1;
3449 		uint64_t delay_unload_1:1;
3450 		uint64_t delay_unload_0:1;
3451 		uint64_t bitmask:8;
3452 		uint64_t or_dis:1;
3453 		uint64_t offset_en:1;
3454 		uint64_t offset:4;
3455 		uint64_t byte:4;
3456 	} cn70xx;
3457 	struct cvmx_lmcx_rlevel_ctl_cn70xx cn70xxp1;
3458 	struct cvmx_lmcx_rlevel_ctl_cn70xx cn73xx;
3459 	struct cvmx_lmcx_rlevel_ctl_s cn78xx;
3460 	struct cvmx_lmcx_rlevel_ctl_s cn78xxp1;
3461 	struct cvmx_lmcx_rlevel_ctl_cn61xx cnf71xx;
3462 	struct cvmx_lmcx_rlevel_ctl_s cnf75xx;
3463 };
3464 
3465 /**
3466  * cvmx_lmc#_rlevel_dbg
3467  *
3468  * A given read of LMC()_RLEVEL_DBG returns the read leveling pass/fail
3469  * results for all possible delay settings (i.e. the BITMASK) for only
3470  * one byte in the last rank that the hardware ran read leveling on.
3471  * LMC()_RLEVEL_CTL[BYTE] selects the particular byte. To get these
3472  * pass/fail results for a different rank, you must run the hardware
3473  * read leveling again. For example, it is possible to get the [BITMASK]
3474  * results for every byte of every rank if you run read leveling separately
3475  * for each rank, probing LMC()_RLEVEL_DBG between each read- leveling.
3476  */
3477 union cvmx_lmcx_rlevel_dbg {
3478 	u64 u64;
3479 	struct cvmx_lmcx_rlevel_dbg_s {
3480 		uint64_t bitmask:64;
3481 	} s;
3482 	struct cvmx_lmcx_rlevel_dbg_s cn61xx;
3483 	struct cvmx_lmcx_rlevel_dbg_s cn63xx;
3484 	struct cvmx_lmcx_rlevel_dbg_s cn63xxp1;
3485 	struct cvmx_lmcx_rlevel_dbg_s cn66xx;
3486 	struct cvmx_lmcx_rlevel_dbg_s cn68xx;
3487 	struct cvmx_lmcx_rlevel_dbg_s cn68xxp1;
3488 	struct cvmx_lmcx_rlevel_dbg_s cn70xx;
3489 	struct cvmx_lmcx_rlevel_dbg_s cn70xxp1;
3490 	struct cvmx_lmcx_rlevel_dbg_s cn73xx;
3491 	struct cvmx_lmcx_rlevel_dbg_s cn78xx;
3492 	struct cvmx_lmcx_rlevel_dbg_s cn78xxp1;
3493 	struct cvmx_lmcx_rlevel_dbg_s cnf71xx;
3494 	struct cvmx_lmcx_rlevel_dbg_s cnf75xx;
3495 };
3496 
3497 /**
3498  * cvmx_lmc#_rlevel_rank#
3499  *
3500  * Four of these CSRs exist per LMC, one for each rank. Read level setting
3501  * is measured in units of 1/4 CK, so the BYTEn values can range over 16 CK
3502  * cycles. Each CSR is written by hardware during a read leveling sequence
3503  * for the rank. (Hardware sets [STATUS] to 3 after hardware read leveling
3504  * completes for the rank.)
3505  *
3506  * If hardware is unable to find a match per LMC()_RLEVEL_CTL[OFFSET_EN] and
3507  * LMC()_RLEVEL_CTL[OFFSET], then hardware sets
3508  * LMC()_RLEVEL_RANK()[BYTEn<5:0>] to 0x0.
3509  *
3510  * Each CSR may also be written by software, but not while a read leveling
3511  * sequence is in progress. (Hardware sets [STATUS] to 1 after a CSR write.)
3512  * Software initiates a hardware read leveling sequence by programming
3513  * LMC()_RLEVEL_CTL and writing [INIT_START] = 1 with [SEQ_SEL]=1.
3514  * See LMC()_RLEVEL_CTL.
3515  *
3516  * LMC()_RLEVEL_RANKi values for ranks i without attached DRAM should be set
3517  * such that they do not increase the range of possible BYTE values for any
3518  * byte lane. The easiest way to do this is to set LMC()_RLEVEL_RANKi =
3519  * LMC()_RLEVEL_RANKj, where j is some rank with attached DRAM whose
3520  * LMC()_RLEVEL_RANKj is already fully initialized.
3521  */
3522 union cvmx_lmcx_rlevel_rankx {
3523 	u64 u64;
3524 	struct cvmx_lmcx_rlevel_rankx_s {
3525 		uint64_t reserved_56_63:8;
3526 		uint64_t status:2;
3527 		uint64_t byte8:6;
3528 		uint64_t byte7:6;
3529 		uint64_t byte6:6;
3530 		uint64_t byte5:6;
3531 		uint64_t byte4:6;
3532 		uint64_t byte3:6;
3533 		uint64_t byte2:6;
3534 		uint64_t byte1:6;
3535 		uint64_t byte0:6;
3536 	} s;
3537 	struct cvmx_lmcx_rlevel_rankx_s cn61xx;
3538 	struct cvmx_lmcx_rlevel_rankx_s cn63xx;
3539 	struct cvmx_lmcx_rlevel_rankx_s cn63xxp1;
3540 	struct cvmx_lmcx_rlevel_rankx_s cn66xx;
3541 	struct cvmx_lmcx_rlevel_rankx_s cn68xx;
3542 	struct cvmx_lmcx_rlevel_rankx_s cn68xxp1;
3543 	struct cvmx_lmcx_rlevel_rankx_s cn70xx;
3544 	struct cvmx_lmcx_rlevel_rankx_s cn70xxp1;
3545 	struct cvmx_lmcx_rlevel_rankx_s cn73xx;
3546 	struct cvmx_lmcx_rlevel_rankx_s cn78xx;
3547 	struct cvmx_lmcx_rlevel_rankx_s cn78xxp1;
3548 	struct cvmx_lmcx_rlevel_rankx_s cnf71xx;
3549 	struct cvmx_lmcx_rlevel_rankx_s cnf75xx;
3550 };
3551 
3552 /**
3553  * cvmx_lmc#_rodt_comp_ctl
3554  *
3555  * LMC_RODT_COMP_CTL = LMC Compensation control
3556  *
3557  */
3558 union cvmx_lmcx_rodt_comp_ctl {
3559 	u64 u64;
3560 	struct cvmx_lmcx_rodt_comp_ctl_s {
3561 		uint64_t reserved_17_63:47;
3562 		uint64_t enable:1;
3563 		uint64_t reserved_12_15:4;
3564 		uint64_t nctl:4;
3565 		uint64_t reserved_5_7:3;
3566 		uint64_t pctl:5;
3567 	} s;
3568 	struct cvmx_lmcx_rodt_comp_ctl_s cn50xx;
3569 	struct cvmx_lmcx_rodt_comp_ctl_s cn52xx;
3570 	struct cvmx_lmcx_rodt_comp_ctl_s cn52xxp1;
3571 	struct cvmx_lmcx_rodt_comp_ctl_s cn56xx;
3572 	struct cvmx_lmcx_rodt_comp_ctl_s cn56xxp1;
3573 	struct cvmx_lmcx_rodt_comp_ctl_s cn58xx;
3574 	struct cvmx_lmcx_rodt_comp_ctl_s cn58xxp1;
3575 };
3576 
3577 /**
3578  * cvmx_lmc#_rodt_ctl
3579  *
3580  * LMC_RODT_CTL = Obsolete LMC Read OnDieTermination control
3581  * See the description in LMC_WODT_CTL1. On Reads, Octeon only supports
3582  * turning on ODT's in the lower 2 DIMM's with the masks as below.
3583  *
3584  * Notes:
3585  * When a given RANK in position N is selected, the RODT _HI and _LO masks
3586  * for that position are used.
3587  * Mask[3:0] is used for RODT control of the RANKs in positions 3, 2, 1,
3588  * and 0, respectively.
3589  * In  64b mode, DIMMs are assumed to be ordered in the following order:
3590  *  position 3: [unused        , DIMM1_RANK1_LO]
3591  *  position 2: [unused        , DIMM1_RANK0_LO]
3592  *  position 1: [unused        , DIMM0_RANK1_LO]
3593  *  position 0: [unused        , DIMM0_RANK0_LO]
3594  * In 128b mode, DIMMs are assumed to be ordered in the following order:
3595  *  position 3: [DIMM3_RANK1_HI, DIMM1_RANK1_LO]
3596  *  position 2: [DIMM3_RANK0_HI, DIMM1_RANK0_LO]
3597  *  position 1: [DIMM2_RANK1_HI, DIMM0_RANK1_LO]
3598  *  position 0: [DIMM2_RANK0_HI, DIMM0_RANK0_LO]
3599  */
3600 union cvmx_lmcx_rodt_ctl {
3601 	u64 u64;
3602 	struct cvmx_lmcx_rodt_ctl_s {
3603 		uint64_t reserved_32_63:32;
3604 		uint64_t rodt_hi3:4;
3605 		uint64_t rodt_hi2:4;
3606 		uint64_t rodt_hi1:4;
3607 		uint64_t rodt_hi0:4;
3608 		uint64_t rodt_lo3:4;
3609 		uint64_t rodt_lo2:4;
3610 		uint64_t rodt_lo1:4;
3611 		uint64_t rodt_lo0:4;
3612 	} s;
3613 	struct cvmx_lmcx_rodt_ctl_s cn30xx;
3614 	struct cvmx_lmcx_rodt_ctl_s cn31xx;
3615 	struct cvmx_lmcx_rodt_ctl_s cn38xx;
3616 	struct cvmx_lmcx_rodt_ctl_s cn38xxp2;
3617 	struct cvmx_lmcx_rodt_ctl_s cn50xx;
3618 	struct cvmx_lmcx_rodt_ctl_s cn52xx;
3619 	struct cvmx_lmcx_rodt_ctl_s cn52xxp1;
3620 	struct cvmx_lmcx_rodt_ctl_s cn56xx;
3621 	struct cvmx_lmcx_rodt_ctl_s cn56xxp1;
3622 	struct cvmx_lmcx_rodt_ctl_s cn58xx;
3623 	struct cvmx_lmcx_rodt_ctl_s cn58xxp1;
3624 };
3625 
3626 /**
3627  * cvmx_lmc#_rodt_mask
3628  *
3629  * System designers may desire to terminate DQ/DQS lines for higher frequency
3630  * DDR operations, especially on a multirank system. DDR3 DQ/DQS I/Os have
3631  * built-in termination resistors that can be turned on or off by the
3632  * controller, after meeting TAOND and TAOF timing requirements.
3633  *
3634  * Each rank has its own ODT pin that fans out to all the memory parts in
3635  * that DIMM. System designers may prefer different combinations of ODT ONs
3636  * for read operations into different ranks. CNXXXX supports full
3637  * programmability by way of the mask register below. Each rank position has
3638  * its own 4-bit programmable field. When the controller does a read to that
3639  * rank, it sets the 4 ODT pins to the MASK pins below. For example, when
3640  * doing a read from Rank0, a system designer may desire to terminate the
3641  * lines with the resistor on DIMM0/Rank1. The mask [RODT_D0_R0] would then
3642  * be [0010].
3643  *
3644  * CNXXXX drives the appropriate mask values on the ODT pins by default.
3645  * If this feature is not required, write 0x0 in this register. Note that,
3646  * as per the JEDEC DDR3 specifications, the ODT pin for the rank that is
3647  * being read should always be 0x0. When a given RANK is selected, the RODT
3648  * mask for that rank is used. The resulting RODT mask is driven to the
3649  * DIMMs in the following manner:
3650  */
3651 union cvmx_lmcx_rodt_mask {
3652 	u64 u64;
3653 	struct cvmx_lmcx_rodt_mask_s {
3654 		uint64_t rodt_d3_r1:8;
3655 		uint64_t rodt_d3_r0:8;
3656 		uint64_t rodt_d2_r1:8;
3657 		uint64_t rodt_d2_r0:8;
3658 		uint64_t rodt_d1_r1:8;
3659 		uint64_t rodt_d1_r0:8;
3660 		uint64_t rodt_d0_r1:8;
3661 		uint64_t rodt_d0_r0:8;
3662 	} s;
3663 	struct cvmx_lmcx_rodt_mask_s cn61xx;
3664 	struct cvmx_lmcx_rodt_mask_s cn63xx;
3665 	struct cvmx_lmcx_rodt_mask_s cn63xxp1;
3666 	struct cvmx_lmcx_rodt_mask_s cn66xx;
3667 	struct cvmx_lmcx_rodt_mask_s cn68xx;
3668 	struct cvmx_lmcx_rodt_mask_s cn68xxp1;
3669 	struct cvmx_lmcx_rodt_mask_cn70xx {
3670 		uint64_t reserved_28_63:36;
3671 		uint64_t rodt_d1_r1:4;
3672 		uint64_t reserved_20_23:4;
3673 		uint64_t rodt_d1_r0:4;
3674 		uint64_t reserved_12_15:4;
3675 		uint64_t rodt_d0_r1:4;
3676 		uint64_t reserved_4_7:4;
3677 		uint64_t rodt_d0_r0:4;
3678 	} cn70xx;
3679 	struct cvmx_lmcx_rodt_mask_cn70xx cn70xxp1;
3680 	struct cvmx_lmcx_rodt_mask_cn70xx cn73xx;
3681 	struct cvmx_lmcx_rodt_mask_cn70xx cn78xx;
3682 	struct cvmx_lmcx_rodt_mask_cn70xx cn78xxp1;
3683 	struct cvmx_lmcx_rodt_mask_s cnf71xx;
3684 	struct cvmx_lmcx_rodt_mask_cn70xx cnf75xx;
3685 };
3686 
3687 /**
3688  * cvmx_lmc#_scramble_cfg0
3689  *
3690  * LMC_SCRAMBLE_CFG0 = LMC Scramble Config0
3691  *
3692  */
3693 union cvmx_lmcx_scramble_cfg0 {
3694 	u64 u64;
3695 	struct cvmx_lmcx_scramble_cfg0_s {
3696 		uint64_t key:64;
3697 	} s;
3698 	struct cvmx_lmcx_scramble_cfg0_s cn61xx;
3699 	struct cvmx_lmcx_scramble_cfg0_s cn66xx;
3700 	struct cvmx_lmcx_scramble_cfg0_s cn70xx;
3701 	struct cvmx_lmcx_scramble_cfg0_s cn70xxp1;
3702 	struct cvmx_lmcx_scramble_cfg0_s cn73xx;
3703 	struct cvmx_lmcx_scramble_cfg0_s cn78xx;
3704 	struct cvmx_lmcx_scramble_cfg0_s cn78xxp1;
3705 	struct cvmx_lmcx_scramble_cfg0_s cnf71xx;
3706 	struct cvmx_lmcx_scramble_cfg0_s cnf75xx;
3707 };
3708 
3709 /**
3710  * cvmx_lmc#_scramble_cfg1
3711  *
3712  * These registers set the aliasing that uses the lowest, legal chip select(s).
3713  *
3714  */
3715 union cvmx_lmcx_scramble_cfg1 {
3716 	u64 u64;
3717 	struct cvmx_lmcx_scramble_cfg1_s {
3718 		uint64_t key:64;
3719 	} s;
3720 	struct cvmx_lmcx_scramble_cfg1_s cn61xx;
3721 	struct cvmx_lmcx_scramble_cfg1_s cn66xx;
3722 	struct cvmx_lmcx_scramble_cfg1_s cn70xx;
3723 	struct cvmx_lmcx_scramble_cfg1_s cn70xxp1;
3724 	struct cvmx_lmcx_scramble_cfg1_s cn73xx;
3725 	struct cvmx_lmcx_scramble_cfg1_s cn78xx;
3726 	struct cvmx_lmcx_scramble_cfg1_s cn78xxp1;
3727 	struct cvmx_lmcx_scramble_cfg1_s cnf71xx;
3728 	struct cvmx_lmcx_scramble_cfg1_s cnf75xx;
3729 };
3730 
3731 /**
3732  * cvmx_lmc#_scramble_cfg2
3733  */
3734 union cvmx_lmcx_scramble_cfg2 {
3735 	u64 u64;
3736 	struct cvmx_lmcx_scramble_cfg2_s {
3737 		uint64_t key:64;
3738 	} s;
3739 	struct cvmx_lmcx_scramble_cfg2_s cn73xx;
3740 	struct cvmx_lmcx_scramble_cfg2_s cn78xx;
3741 	struct cvmx_lmcx_scramble_cfg2_s cnf75xx;
3742 };
3743 
3744 /**
3745  * cvmx_lmc#_scrambled_fadr
3746  *
3747  * LMC()_FADR captures the failing pre-scrambled address location (split into
3748  * DIMM, bunk, bank, etc). If scrambling is off, LMC()_FADR also captures the
3749  * failing physical location in the DRAM parts. LMC()_SCRAMBLED_FADR captures
3750  * the actual failing address location in the physical DRAM parts, i.e.:
3751  *
3752  * * If scrambling is on, LMC()_SCRAMBLED_FADR contains the failing physical
3753  * location in the
3754  * DRAM parts (split into DIMM, bunk, bank, etc).
3755  *
3756  * * If scrambling is off, the pre-scramble and post-scramble addresses are
3757  * the same, and so the
3758  * contents of LMC()_SCRAMBLED_FADR match the contents of LMC()_FADR.
3759  *
3760  * This register only captures the first transaction with ECC errors. A DED
3761  * error can over-write this register with its failing addresses if the first
3762  * error was a SEC. If you write LMC()_CONFIG -> SEC_ERR/DED_ERR, it clears
3763  * the error bits and captures the next failing address. If [FDIMM] is 1,
3764  * that means the error is in the higher DIMM.
3765  */
3766 union cvmx_lmcx_scrambled_fadr {
3767 	u64 u64;
3768 	struct cvmx_lmcx_scrambled_fadr_s {
3769 		uint64_t reserved_43_63:21;
3770 		uint64_t fcid:3;
3771 		uint64_t fill_order:2;
3772 		uint64_t reserved_14_37:24;
3773 		uint64_t fcol:14;
3774 	} s;
3775 	struct cvmx_lmcx_scrambled_fadr_cn61xx {
3776 		uint64_t reserved_36_63:28;
3777 		uint64_t fdimm:2;
3778 		uint64_t fbunk:1;
3779 		uint64_t fbank:3;
3780 		uint64_t frow:16;
3781 		uint64_t fcol:14;
3782 	} cn61xx;
3783 	struct cvmx_lmcx_scrambled_fadr_cn61xx cn66xx;
3784 	struct cvmx_lmcx_scrambled_fadr_cn70xx {
3785 		uint64_t reserved_40_63:24;
3786 		uint64_t fill_order:2;
3787 		uint64_t fdimm:1;
3788 		uint64_t fbunk:1;
3789 		uint64_t fbank:4;
3790 		uint64_t frow:18;
3791 		uint64_t fcol:14;
3792 	} cn70xx;
3793 	struct cvmx_lmcx_scrambled_fadr_cn70xx cn70xxp1;
3794 	struct cvmx_lmcx_scrambled_fadr_cn73xx {
3795 		uint64_t reserved_43_63:21;
3796 		uint64_t fcid:3;
3797 		uint64_t fill_order:2;
3798 		uint64_t fdimm:1;
3799 		uint64_t fbunk:1;
3800 		uint64_t fbank:4;
3801 		uint64_t frow:18;
3802 		uint64_t fcol:14;
3803 	} cn73xx;
3804 	struct cvmx_lmcx_scrambled_fadr_cn73xx cn78xx;
3805 	struct cvmx_lmcx_scrambled_fadr_cn73xx cn78xxp1;
3806 	struct cvmx_lmcx_scrambled_fadr_cn61xx cnf71xx;
3807 	struct cvmx_lmcx_scrambled_fadr_cn73xx cnf75xx;
3808 };
3809 
3810 /**
3811  * cvmx_lmc#_seq_ctl
3812  *
3813  * This register is used to initiate the various control sequences in the LMC.
3814  *
3815  */
3816 union cvmx_lmcx_seq_ctl {
3817 	u64 u64;
3818 	struct cvmx_lmcx_seq_ctl_s {
3819 		uint64_t reserved_6_63:58;
3820 		uint64_t seq_complete:1;
3821 		uint64_t seq_sel:4;
3822 		uint64_t init_start:1;
3823 	} s;
3824 	struct cvmx_lmcx_seq_ctl_s cn70xx;
3825 	struct cvmx_lmcx_seq_ctl_s cn70xxp1;
3826 	struct cvmx_lmcx_seq_ctl_s cn73xx;
3827 	struct cvmx_lmcx_seq_ctl_s cn78xx;
3828 	struct cvmx_lmcx_seq_ctl_s cn78xxp1;
3829 	struct cvmx_lmcx_seq_ctl_s cnf75xx;
3830 };
3831 
3832 /**
3833  * cvmx_lmc#_slot_ctl0
3834  *
3835  * This register is an assortment of control fields needed by the memory
3836  * controller. If software has not previously written to this register
3837  * (since the last DRESET), hardware updates the fields in this register to
3838  * the minimum allowed value when any of LMC()_RLEVEL_RANK(),
3839  * LMC()_WLEVEL_RANK(), LMC()_CONTROL, and LMC()_MODEREG_PARAMS0 registers
3840  * change. Ideally, only read this register after LMC has been initialized and
3841  * LMC()_RLEVEL_RANK(), LMC()_WLEVEL_RANK() have valid data.
3842  *
3843  * The interpretation of the fields in this register depends on
3844  * LMC(0)_CONFIG[DDR2T]:
3845  *
3846  * * If LMC()_CONFIG[DDR2T]=1, (FieldValue + 4) is the minimum CK cycles
3847  * between when the DRAM part registers CAS commands of the first and
3848  * second types from different cache blocks.
3849  *
3850  * If LMC()_CONFIG[DDR2T]=0, (FieldValue + 3) is the minimum CK cycles
3851  * between when the DRAM part registers CAS commands of the first and second
3852  * types from different cache blocks.
3853  * FieldValue = 0 is always illegal in this case.
3854  * The hardware-calculated minimums for these fields are shown in
3855  * LMC(0)_SLOT_CTL0 Hardware-Calculated Minimums.
3856  */
3857 union cvmx_lmcx_slot_ctl0 {
3858 	u64 u64;
3859 	struct cvmx_lmcx_slot_ctl0_s {
3860 		uint64_t reserved_50_63:14;
3861 		uint64_t w2r_l_init_ext:1;
3862 		uint64_t w2r_init_ext:1;
3863 		uint64_t w2w_l_init:6;
3864 		uint64_t w2r_l_init:6;
3865 		uint64_t r2w_l_init:6;
3866 		uint64_t r2r_l_init:6;
3867 		uint64_t w2w_init:6;
3868 		uint64_t w2r_init:6;
3869 		uint64_t r2w_init:6;
3870 		uint64_t r2r_init:6;
3871 	} s;
3872 	struct cvmx_lmcx_slot_ctl0_cn61xx {
3873 		uint64_t reserved_24_63:40;
3874 		uint64_t w2w_init:6;
3875 		uint64_t w2r_init:6;
3876 		uint64_t r2w_init:6;
3877 		uint64_t r2r_init:6;
3878 	} cn61xx;
3879 	struct cvmx_lmcx_slot_ctl0_cn61xx cn63xx;
3880 	struct cvmx_lmcx_slot_ctl0_cn61xx cn63xxp1;
3881 	struct cvmx_lmcx_slot_ctl0_cn61xx cn66xx;
3882 	struct cvmx_lmcx_slot_ctl0_cn61xx cn68xx;
3883 	struct cvmx_lmcx_slot_ctl0_cn61xx cn68xxp1;
3884 	struct cvmx_lmcx_slot_ctl0_cn70xx {
3885 		uint64_t reserved_48_63:16;
3886 		uint64_t w2w_l_init:6;
3887 		uint64_t w2r_l_init:6;
3888 		uint64_t r2w_l_init:6;
3889 		uint64_t r2r_l_init:6;
3890 		uint64_t w2w_init:6;
3891 		uint64_t w2r_init:6;
3892 		uint64_t r2w_init:6;
3893 		uint64_t r2r_init:6;
3894 	} cn70xx;
3895 	struct cvmx_lmcx_slot_ctl0_cn70xx cn70xxp1;
3896 	struct cvmx_lmcx_slot_ctl0_s cn73xx;
3897 	struct cvmx_lmcx_slot_ctl0_s cn78xx;
3898 	struct cvmx_lmcx_slot_ctl0_s cn78xxp1;
3899 	struct cvmx_lmcx_slot_ctl0_cn61xx cnf71xx;
3900 	struct cvmx_lmcx_slot_ctl0_s cnf75xx;
3901 };
3902 
3903 /**
3904  * cvmx_lmc#_slot_ctl1
3905  *
3906  * This register is an assortment of control fields needed by the memory
3907  * controller. If software has not previously written to this register
3908  * (since the last DRESET), hardware updates the fields in this register to
3909  * the minimum allowed value when any of LMC()_RLEVEL_RANK(),
3910  * LMC()_WLEVEL_RANK(), LMC()_CONTROL and LMC()_MODEREG_PARAMS0 change.
3911  * Ideally, only read this register after LMC has been initialized and
3912  * LMC()_RLEVEL_RANK(), LMC()_WLEVEL_RANK() have valid data.
3913  *
3914  * The interpretation of the fields in this CSR depends on
3915  * LMC(0)_CONFIG[DDR2T]:
3916  *
3917  * * If LMC()_CONFIG[DDR2T]=1, (FieldValue + 4) is the minimum CK cycles
3918  * between when the DRAM part registers CAS commands of the first and
3919  * second types from different cache blocks.
3920  *
3921  * * If LMC()_CONFIG[DDR2T]=0, (FieldValue + 3) is the minimum CK cycles
3922  * between when the DRAM part registers CAS commands of the first and
3923  * second types from different cache blocks.
3924  * FieldValue = 0 is always illegal in this case.
3925  *
3926  * The hardware-calculated minimums for these fields are shown in
3927  * LMC(0)_SLOT_CTL1 Hardware-Calculated Minimums.
3928  */
3929 union cvmx_lmcx_slot_ctl1 {
3930 	u64 u64;
3931 	struct cvmx_lmcx_slot_ctl1_s {
3932 		uint64_t reserved_24_63:40;
3933 		uint64_t w2w_xrank_init:6;
3934 		uint64_t w2r_xrank_init:6;
3935 		uint64_t r2w_xrank_init:6;
3936 		uint64_t r2r_xrank_init:6;
3937 	} s;
3938 	struct cvmx_lmcx_slot_ctl1_s cn61xx;
3939 	struct cvmx_lmcx_slot_ctl1_s cn63xx;
3940 	struct cvmx_lmcx_slot_ctl1_s cn63xxp1;
3941 	struct cvmx_lmcx_slot_ctl1_s cn66xx;
3942 	struct cvmx_lmcx_slot_ctl1_s cn68xx;
3943 	struct cvmx_lmcx_slot_ctl1_s cn68xxp1;
3944 	struct cvmx_lmcx_slot_ctl1_s cn70xx;
3945 	struct cvmx_lmcx_slot_ctl1_s cn70xxp1;
3946 	struct cvmx_lmcx_slot_ctl1_s cn73xx;
3947 	struct cvmx_lmcx_slot_ctl1_s cn78xx;
3948 	struct cvmx_lmcx_slot_ctl1_s cn78xxp1;
3949 	struct cvmx_lmcx_slot_ctl1_s cnf71xx;
3950 	struct cvmx_lmcx_slot_ctl1_s cnf75xx;
3951 };
3952 
3953 /**
3954  * cvmx_lmc#_slot_ctl2
3955  *
3956  * This register is an assortment of control fields needed by the memory
3957  * controller. If software has not previously written to this register
3958  * (since the last DRESET), hardware updates the fields in this register
3959  * to the minimum allowed value when any of LMC()_RLEVEL_RANK(),
3960  * LMC()_WLEVEL_RANK(), LMC()_CONTROL and LMC()_MODEREG_PARAMS0 change.
3961  * Ideally, only read this register after LMC has been initialized and
3962  * LMC()_RLEVEL_RANK(), LMC()_WLEVEL_RANK() have valid data.
3963  *
3964  * The interpretation of the fields in this CSR depends on LMC(0)_CONFIG[DDR2T]:
3965  *
3966  * * If LMC()_CONFIG[DDR2T] = 1, (FieldValue + 4) is the minimum CK cycles
3967  * between when the DRAM part registers CAS commands of the first and
3968  * second types from different cache blocks.
3969  *
3970  * * If LMC()_CONFIG[DDR2T] = 0, (FieldValue + 3) is the minimum CK cycles
3971  * between when the DRAM part registers CAS commands of the first and second
3972  * types from different cache blocks.
3973  * FieldValue = 0 is always illegal in this case.
3974  *
3975  * The hardware-calculated minimums for these fields are shown in LMC Registers.
3976  */
3977 union cvmx_lmcx_slot_ctl2 {
3978 	u64 u64;
3979 	struct cvmx_lmcx_slot_ctl2_s {
3980 		uint64_t reserved_24_63:40;
3981 		uint64_t w2w_xdimm_init:6;
3982 		uint64_t w2r_xdimm_init:6;
3983 		uint64_t r2w_xdimm_init:6;
3984 		uint64_t r2r_xdimm_init:6;
3985 	} s;
3986 	struct cvmx_lmcx_slot_ctl2_s cn61xx;
3987 	struct cvmx_lmcx_slot_ctl2_s cn63xx;
3988 	struct cvmx_lmcx_slot_ctl2_s cn63xxp1;
3989 	struct cvmx_lmcx_slot_ctl2_s cn66xx;
3990 	struct cvmx_lmcx_slot_ctl2_s cn68xx;
3991 	struct cvmx_lmcx_slot_ctl2_s cn68xxp1;
3992 	struct cvmx_lmcx_slot_ctl2_s cn70xx;
3993 	struct cvmx_lmcx_slot_ctl2_s cn70xxp1;
3994 	struct cvmx_lmcx_slot_ctl2_s cn73xx;
3995 	struct cvmx_lmcx_slot_ctl2_s cn78xx;
3996 	struct cvmx_lmcx_slot_ctl2_s cn78xxp1;
3997 	struct cvmx_lmcx_slot_ctl2_s cnf71xx;
3998 	struct cvmx_lmcx_slot_ctl2_s cnf75xx;
3999 };
4000 
4001 /**
4002  * cvmx_lmc#_slot_ctl3
4003  *
4004  * This register is an assortment of control fields needed by the memory
4005  * controller. If software has not previously written to this register
4006  * (since the last DRESET), hardware updates the fields in this register
4007  * to the minimum allowed value when any of LMC()_RLEVEL_RANK(),
4008  * LMC()_WLEVEL_RANK(), LMC()_CONTROL and LMC()_MODEREG_PARAMS0 change.
4009  * Ideally, only read this register after LMC has been initialized and
4010  * LMC()_RLEVEL_RANK(), LMC()_WLEVEL_RANK() have valid data.
4011  *
4012  * The interpretation of the fields in this CSR depends on LMC(0)_CONFIG[DDR2T]:
4013  *
4014  * * If LMC()_CONFIG[DDR2T] = 1, (FieldValue + 4) is the minimum CK cycles
4015  * between when the DRAM part registers CAS commands of the first and
4016  * second types from different cache blocks.
4017  *
4018  * * If LMC()_CONFIG[DDR2T] = 0, (FieldValue + 3) is the minimum CK cycles
4019  * between when the DRAM part registers CAS commands of the first and second
4020  * types from different cache blocks.
4021  * FieldValue = 0 is always illegal in this case.
4022  *
4023  * The hardware-calculated minimums for these fields are shown in LMC Registers.
4024  */
4025 union cvmx_lmcx_slot_ctl3 {
4026 	u64 u64;
4027 	struct cvmx_lmcx_slot_ctl3_s {
4028 		uint64_t reserved_50_63:14;
4029 		uint64_t w2r_l_xrank_init_ext:1;
4030 		uint64_t w2r_xrank_init_ext:1;
4031 		uint64_t w2w_l_xrank_init:6;
4032 		uint64_t w2r_l_xrank_init:6;
4033 		uint64_t r2w_l_xrank_init:6;
4034 		uint64_t r2r_l_xrank_init:6;
4035 		uint64_t w2w_xrank_init:6;
4036 		uint64_t w2r_xrank_init:6;
4037 		uint64_t r2w_xrank_init:6;
4038 		uint64_t r2r_xrank_init:6;
4039 	} s;
4040 	struct cvmx_lmcx_slot_ctl3_s cn73xx;
4041 	struct cvmx_lmcx_slot_ctl3_s cn78xx;
4042 	struct cvmx_lmcx_slot_ctl3_s cnf75xx;
4043 };
4044 
4045 /**
4046  * cvmx_lmc#_timing_params0
4047  */
4048 union cvmx_lmcx_timing_params0 {
4049 	u64 u64;
4050 	struct cvmx_lmcx_timing_params0_s {
4051 		uint64_t reserved_54_63:10;
4052 		uint64_t tbcw:6;
4053 		uint64_t reserved_26_47:22;
4054 		uint64_t tmrd:4;
4055 		uint64_t reserved_8_21:14;
4056 		uint64_t tckeon:8;
4057 	} s;
4058 	struct cvmx_lmcx_timing_params0_cn61xx {
4059 		uint64_t reserved_47_63:17;
4060 		uint64_t trp_ext:1;
4061 		uint64_t tcksre:4;
4062 		uint64_t trp:4;
4063 		uint64_t tzqinit:4;
4064 		uint64_t tdllk:4;
4065 		uint64_t tmod:4;
4066 		uint64_t tmrd:4;
4067 		uint64_t txpr:4;
4068 		uint64_t tcke:4;
4069 		uint64_t tzqcs:4;
4070 		uint64_t reserved_0_9:10;
4071 	} cn61xx;
4072 	struct cvmx_lmcx_timing_params0_cn61xx cn63xx;
4073 	struct cvmx_lmcx_timing_params0_cn63xxp1 {
4074 		uint64_t reserved_46_63:18;
4075 		uint64_t tcksre:4;
4076 		uint64_t trp:4;
4077 		uint64_t tzqinit:4;
4078 		uint64_t tdllk:4;
4079 		uint64_t tmod:4;
4080 		uint64_t tmrd:4;
4081 		uint64_t txpr:4;
4082 		uint64_t tcke:4;
4083 		uint64_t tzqcs:4;
4084 		uint64_t tckeon:10;
4085 	} cn63xxp1;
4086 	struct cvmx_lmcx_timing_params0_cn61xx cn66xx;
4087 	struct cvmx_lmcx_timing_params0_cn61xx cn68xx;
4088 	struct cvmx_lmcx_timing_params0_cn61xx cn68xxp1;
4089 	struct cvmx_lmcx_timing_params0_cn70xx {
4090 		uint64_t reserved_48_63:16;
4091 		uint64_t tcksre:4;
4092 		uint64_t trp:5;
4093 		uint64_t tzqinit:4;
4094 		uint64_t tdllk:4;
4095 		uint64_t tmod:5;
4096 		uint64_t tmrd:4;
4097 		uint64_t txpr:6;
4098 		uint64_t tcke:4;
4099 		uint64_t tzqcs:4;
4100 		uint64_t reserved_0_7:8;
4101 	} cn70xx;
4102 	struct cvmx_lmcx_timing_params0_cn70xx cn70xxp1;
4103 	struct cvmx_lmcx_timing_params0_cn73xx {
4104 		uint64_t reserved_54_63:10;
4105 		uint64_t tbcw:6;
4106 		uint64_t tcksre:4;
4107 		uint64_t trp:5;
4108 		uint64_t tzqinit:4;
4109 		uint64_t tdllk:4;
4110 		uint64_t tmod:5;
4111 		uint64_t tmrd:4;
4112 		uint64_t txpr:6;
4113 		uint64_t tcke:4;
4114 		uint64_t tzqcs:4;
4115 		uint64_t reserved_0_7:8;
4116 	} cn73xx;
4117 	struct cvmx_lmcx_timing_params0_cn73xx cn78xx;
4118 	struct cvmx_lmcx_timing_params0_cn73xx cn78xxp1;
4119 	struct cvmx_lmcx_timing_params0_cn61xx cnf71xx;
4120 	struct cvmx_lmcx_timing_params0_cn73xx cnf75xx;
4121 };
4122 
4123 /**
4124  * cvmx_lmc#_timing_params1
4125  */
4126 union cvmx_lmcx_timing_params1 {
4127 	u64 u64;
4128 	struct cvmx_lmcx_timing_params1_s {
4129 		uint64_t reserved_59_63:5;
4130 		uint64_t txp_ext:1;
4131 		uint64_t trcd_ext:1;
4132 		uint64_t tpdm_full_cycle_ena:1;
4133 		uint64_t trfc_dlr:7;
4134 		uint64_t reserved_4_48:45;
4135 		uint64_t tmprr:4;
4136 	} s;
4137 	struct cvmx_lmcx_timing_params1_cn61xx {
4138 		uint64_t reserved_47_63:17;
4139 		uint64_t tras_ext:1;
4140 		uint64_t txpdll:5;
4141 		uint64_t tfaw:5;
4142 		uint64_t twldqsen:4;
4143 		uint64_t twlmrd:4;
4144 		uint64_t txp:3;
4145 		uint64_t trrd:3;
4146 		uint64_t trfc:5;
4147 		uint64_t twtr:4;
4148 		uint64_t trcd:4;
4149 		uint64_t tras:5;
4150 		uint64_t tmprr:4;
4151 	} cn61xx;
4152 	struct cvmx_lmcx_timing_params1_cn61xx cn63xx;
4153 	struct cvmx_lmcx_timing_params1_cn63xxp1 {
4154 		uint64_t reserved_46_63:18;
4155 		uint64_t txpdll:5;
4156 		uint64_t tfaw:5;
4157 		uint64_t twldqsen:4;
4158 		uint64_t twlmrd:4;
4159 		uint64_t txp:3;
4160 		uint64_t trrd:3;
4161 		uint64_t trfc:5;
4162 		uint64_t twtr:4;
4163 		uint64_t trcd:4;
4164 		uint64_t tras:5;
4165 		uint64_t tmprr:4;
4166 	} cn63xxp1;
4167 	struct cvmx_lmcx_timing_params1_cn61xx cn66xx;
4168 	struct cvmx_lmcx_timing_params1_cn61xx cn68xx;
4169 	struct cvmx_lmcx_timing_params1_cn61xx cn68xxp1;
4170 	struct cvmx_lmcx_timing_params1_cn70xx {
4171 		uint64_t reserved_49_63:15;
4172 		uint64_t txpdll:5;
4173 		uint64_t tfaw:5;
4174 		uint64_t twldqsen:4;
4175 		uint64_t twlmrd:4;
4176 		uint64_t txp:3;
4177 		uint64_t trrd:3;
4178 		uint64_t trfc:7;
4179 		uint64_t twtr:4;
4180 		uint64_t trcd:4;
4181 		uint64_t tras:6;
4182 		uint64_t tmprr:4;
4183 	} cn70xx;
4184 	struct cvmx_lmcx_timing_params1_cn70xx cn70xxp1;
4185 	struct cvmx_lmcx_timing_params1_cn73xx {
4186 		uint64_t reserved_59_63:5;
4187 		uint64_t txp_ext:1;
4188 		uint64_t trcd_ext:1;
4189 		uint64_t tpdm_full_cycle_ena:1;
4190 		uint64_t trfc_dlr:7;
4191 		uint64_t txpdll:5;
4192 		uint64_t tfaw:5;
4193 		uint64_t twldqsen:4;
4194 		uint64_t twlmrd:4;
4195 		uint64_t txp:3;
4196 		uint64_t trrd:3;
4197 		uint64_t trfc:7;
4198 		uint64_t twtr:4;
4199 		uint64_t trcd:4;
4200 		uint64_t tras:6;
4201 		uint64_t tmprr:4;
4202 	} cn73xx;
4203 	struct cvmx_lmcx_timing_params1_cn73xx cn78xx;
4204 	struct cvmx_lmcx_timing_params1_cn73xx cn78xxp1;
4205 	struct cvmx_lmcx_timing_params1_cn61xx cnf71xx;
4206 	struct cvmx_lmcx_timing_params1_cn73xx cnf75xx;
4207 };
4208 
4209 /**
4210  * cvmx_lmc#_timing_params2
4211  *
4212  * This register sets timing parameters for DDR4.
4213  *
4214  */
4215 union cvmx_lmcx_timing_params2 {
4216 	u64 u64;
4217 	struct cvmx_lmcx_timing_params2_s {
4218 		uint64_t reserved_16_63:48;
4219 		uint64_t trrd_l_ext:1;
4220 		uint64_t trtp:4;
4221 		uint64_t t_rw_op_max:4;
4222 		uint64_t twtr_l:4;
4223 		uint64_t trrd_l:3;
4224 	} s;
4225 	struct cvmx_lmcx_timing_params2_cn70xx {
4226 		uint64_t reserved_15_63:49;
4227 		uint64_t trtp:4;
4228 		uint64_t t_rw_op_max:4;
4229 		uint64_t twtr_l:4;
4230 		uint64_t trrd_l:3;
4231 	} cn70xx;
4232 	struct cvmx_lmcx_timing_params2_cn70xx cn70xxp1;
4233 	struct cvmx_lmcx_timing_params2_s cn73xx;
4234 	struct cvmx_lmcx_timing_params2_s cn78xx;
4235 	struct cvmx_lmcx_timing_params2_s cn78xxp1;
4236 	struct cvmx_lmcx_timing_params2_s cnf75xx;
4237 };
4238 
4239 /**
4240  * cvmx_lmc#_tro_ctl
4241  *
4242  * LMC_TRO_CTL = LMC Temperature Ring Osc Control
4243  * This register is an assortment of various control fields needed to
4244  * control the temperature ring oscillator
4245  *
4246  * Notes:
4247  * To bring up the temperature ring oscillator, write TRESET to 0, and
4248  * follow by initializing RCLK_CNT to desired value
4249  */
4250 union cvmx_lmcx_tro_ctl {
4251 	u64 u64;
4252 	struct cvmx_lmcx_tro_ctl_s {
4253 		uint64_t reserved_33_63:31;
4254 		uint64_t rclk_cnt:32;
4255 		uint64_t treset:1;
4256 	} s;
4257 	struct cvmx_lmcx_tro_ctl_s cn61xx;
4258 	struct cvmx_lmcx_tro_ctl_s cn63xx;
4259 	struct cvmx_lmcx_tro_ctl_s cn63xxp1;
4260 	struct cvmx_lmcx_tro_ctl_s cn66xx;
4261 	struct cvmx_lmcx_tro_ctl_s cn68xx;
4262 	struct cvmx_lmcx_tro_ctl_s cn68xxp1;
4263 	struct cvmx_lmcx_tro_ctl_s cnf71xx;
4264 };
4265 
4266 /**
4267  * cvmx_lmc#_tro_stat
4268  *
4269  * LMC_TRO_STAT = LMC Temperature Ring Osc Status
4270  * This register is an assortment of various control fields needed to
4271  * control the temperature ring oscillator
4272  */
4273 union cvmx_lmcx_tro_stat {
4274 	u64 u64;
4275 	struct cvmx_lmcx_tro_stat_s {
4276 		uint64_t reserved_32_63:32;
4277 		uint64_t ring_cnt:32;
4278 	} s;
4279 	struct cvmx_lmcx_tro_stat_s cn61xx;
4280 	struct cvmx_lmcx_tro_stat_s cn63xx;
4281 	struct cvmx_lmcx_tro_stat_s cn63xxp1;
4282 	struct cvmx_lmcx_tro_stat_s cn66xx;
4283 	struct cvmx_lmcx_tro_stat_s cn68xx;
4284 	struct cvmx_lmcx_tro_stat_s cn68xxp1;
4285 	struct cvmx_lmcx_tro_stat_s cnf71xx;
4286 };
4287 
4288 /**
4289  * cvmx_lmc#_wlevel_ctl
4290  */
4291 union cvmx_lmcx_wlevel_ctl {
4292 	u64 u64;
4293 	struct cvmx_lmcx_wlevel_ctl_s {
4294 		uint64_t reserved_22_63:42;
4295 		uint64_t rtt_nom:3;
4296 		uint64_t bitmask:8;
4297 		uint64_t or_dis:1;
4298 		uint64_t sset:1;
4299 		uint64_t lanemask:9;
4300 	} s;
4301 	struct cvmx_lmcx_wlevel_ctl_s cn61xx;
4302 	struct cvmx_lmcx_wlevel_ctl_s cn63xx;
4303 	struct cvmx_lmcx_wlevel_ctl_cn63xxp1 {
4304 		uint64_t reserved_10_63:54;
4305 		uint64_t sset:1;
4306 		uint64_t lanemask:9;
4307 	} cn63xxp1;
4308 	struct cvmx_lmcx_wlevel_ctl_s cn66xx;
4309 	struct cvmx_lmcx_wlevel_ctl_s cn68xx;
4310 	struct cvmx_lmcx_wlevel_ctl_s cn68xxp1;
4311 	struct cvmx_lmcx_wlevel_ctl_s cn70xx;
4312 	struct cvmx_lmcx_wlevel_ctl_s cn70xxp1;
4313 	struct cvmx_lmcx_wlevel_ctl_s cn73xx;
4314 	struct cvmx_lmcx_wlevel_ctl_s cn78xx;
4315 	struct cvmx_lmcx_wlevel_ctl_s cn78xxp1;
4316 	struct cvmx_lmcx_wlevel_ctl_s cnf71xx;
4317 	struct cvmx_lmcx_wlevel_ctl_s cnf75xx;
4318 };
4319 
4320 /**
4321  * cvmx_lmc#_wlevel_dbg
4322  *
4323  * A given write of LMC()_WLEVEL_DBG returns the write leveling pass/fail
4324  * results for all possible delay settings (i.e. the BITMASK) for only one
4325  * byte in the last rank that the hardware write leveled.
4326  * LMC()_WLEVEL_DBG[BYTE] selects the particular byte. To get these
4327  * pass/fail results for a different rank, you must run the hardware write
4328  * leveling again. For example, it is possible to get the [BITMASK] results
4329  * for every byte of every rank if you run write leveling separately for
4330  * each rank, probing LMC()_WLEVEL_DBG between each write-leveling.
4331  */
4332 union cvmx_lmcx_wlevel_dbg {
4333 	u64 u64;
4334 	struct cvmx_lmcx_wlevel_dbg_s {
4335 		uint64_t reserved_12_63:52;
4336 		uint64_t bitmask:8;
4337 		uint64_t byte:4;
4338 	} s;
4339 	struct cvmx_lmcx_wlevel_dbg_s cn61xx;
4340 	struct cvmx_lmcx_wlevel_dbg_s cn63xx;
4341 	struct cvmx_lmcx_wlevel_dbg_s cn63xxp1;
4342 	struct cvmx_lmcx_wlevel_dbg_s cn66xx;
4343 	struct cvmx_lmcx_wlevel_dbg_s cn68xx;
4344 	struct cvmx_lmcx_wlevel_dbg_s cn68xxp1;
4345 	struct cvmx_lmcx_wlevel_dbg_s cn70xx;
4346 	struct cvmx_lmcx_wlevel_dbg_s cn70xxp1;
4347 	struct cvmx_lmcx_wlevel_dbg_s cn73xx;
4348 	struct cvmx_lmcx_wlevel_dbg_s cn78xx;
4349 	struct cvmx_lmcx_wlevel_dbg_s cn78xxp1;
4350 	struct cvmx_lmcx_wlevel_dbg_s cnf71xx;
4351 	struct cvmx_lmcx_wlevel_dbg_s cnf75xx;
4352 };
4353 
4354 /**
4355  * cvmx_lmc#_wlevel_rank#
4356  *
4357  * Four of these CSRs exist per LMC, one for each rank. Write level setting
4358  * is measured in units of 1/8 CK, so the below BYTEn values can range over
4359  * 4 CK cycles. Assuming LMC()_WLEVEL_CTL[SSET]=0, the BYTEn<2:0> values are
4360  * not used during write leveling, and they are overwritten by the hardware
4361  * as part of the write leveling sequence. (Hardware sets [STATUS] to 3 after
4362  * hardware write leveling completes for the rank). Software needs to set
4363  * BYTEn<4:3> bits.
4364  *
4365  * Each CSR may also be written by software, but not while a write leveling
4366  * sequence is in progress. (Hardware sets [STATUS] to 1 after a CSR write.)
4367  * Software initiates a hardware write-leveling sequence by programming
4368  * LMC()_WLEVEL_CTL and writing RANKMASK and INIT_START=1 with SEQ_SEL=6 in
4369  * LMC*0_CONFIG.
4370  *
4371  * LMC will then step through and accumulate write leveling results for 8
4372  * unique delay settings (twice), starting at a delay of LMC()_WLEVEL_RANK()
4373  * [BYTEn<4:3>]* 8 CK increasing by 1/8 CK each setting. Hardware will then
4374  * set LMC()_WLEVEL_RANK()[BYTEn<2:0>] to indicate the first write leveling
4375  * result of 1 that followed a result of 0 during the sequence by searching
4376  * for a '1100' pattern in the generated bitmask, except that LMC will always
4377  * write LMC()_WLEVEL_RANK()[BYTEn<0>]=0. If hardware is unable to find a match
4378  * for a '1100' pattern, then hardware sets LMC()_WLEVEL_RANK() [BYTEn<2:0>]
4379  * to 0x4. See LMC()_WLEVEL_CTL.
4380  *
4381  * LMC()_WLEVEL_RANKi values for ranks i without attached DRAM should be set
4382  * such that they do not increase the range of possible BYTE values for any
4383  * byte lane. The easiest way to do this is to set LMC()_WLEVEL_RANKi =
4384  * LMC()_WLEVEL_RANKj, where j is some rank with attached DRAM whose
4385  * LMC()_WLEVEL_RANKj is already fully initialized.
4386  */
4387 union cvmx_lmcx_wlevel_rankx {
4388 	u64 u64;
4389 	struct cvmx_lmcx_wlevel_rankx_s {
4390 		uint64_t reserved_47_63:17;
4391 		uint64_t status:2;
4392 		uint64_t byte8:5;
4393 		uint64_t byte7:5;
4394 		uint64_t byte6:5;
4395 		uint64_t byte5:5;
4396 		uint64_t byte4:5;
4397 		uint64_t byte3:5;
4398 		uint64_t byte2:5;
4399 		uint64_t byte1:5;
4400 		uint64_t byte0:5;
4401 	} s;
4402 	struct cvmx_lmcx_wlevel_rankx_s cn61xx;
4403 	struct cvmx_lmcx_wlevel_rankx_s cn63xx;
4404 	struct cvmx_lmcx_wlevel_rankx_s cn63xxp1;
4405 	struct cvmx_lmcx_wlevel_rankx_s cn66xx;
4406 	struct cvmx_lmcx_wlevel_rankx_s cn68xx;
4407 	struct cvmx_lmcx_wlevel_rankx_s cn68xxp1;
4408 	struct cvmx_lmcx_wlevel_rankx_s cn70xx;
4409 	struct cvmx_lmcx_wlevel_rankx_s cn70xxp1;
4410 	struct cvmx_lmcx_wlevel_rankx_s cn73xx;
4411 	struct cvmx_lmcx_wlevel_rankx_s cn78xx;
4412 	struct cvmx_lmcx_wlevel_rankx_s cn78xxp1;
4413 	struct cvmx_lmcx_wlevel_rankx_s cnf71xx;
4414 	struct cvmx_lmcx_wlevel_rankx_s cnf75xx;
4415 };
4416 
4417 /**
4418  * cvmx_lmc#_wodt_ctl0
4419  *
4420  * LMC_WODT_CTL0 = LMC Write OnDieTermination control
4421  * See the description in LMC_WODT_CTL1.
4422  *
4423  * Notes:
4424  * Together, the LMC_WODT_CTL1 and LMC_WODT_CTL0 CSRs control the write
4425  * ODT mask.  See LMC_WODT_CTL1.
4426  *
4427  */
4428 union cvmx_lmcx_wodt_ctl0 {
4429 	u64 u64;
4430 	struct cvmx_lmcx_wodt_ctl0_s {
4431 		uint64_t reserved_0_63:64;
4432 	} s;
4433 	struct cvmx_lmcx_wodt_ctl0_cn30xx {
4434 		uint64_t reserved_32_63:32;
4435 		uint64_t wodt_d1_r1:8;
4436 		uint64_t wodt_d1_r0:8;
4437 		uint64_t wodt_d0_r1:8;
4438 		uint64_t wodt_d0_r0:8;
4439 	} cn30xx;
4440 	struct cvmx_lmcx_wodt_ctl0_cn30xx cn31xx;
4441 	struct cvmx_lmcx_wodt_ctl0_cn38xx {
4442 		uint64_t reserved_32_63:32;
4443 		uint64_t wodt_hi3:4;
4444 		uint64_t wodt_hi2:4;
4445 		uint64_t wodt_hi1:4;
4446 		uint64_t wodt_hi0:4;
4447 		uint64_t wodt_lo3:4;
4448 		uint64_t wodt_lo2:4;
4449 		uint64_t wodt_lo1:4;
4450 		uint64_t wodt_lo0:4;
4451 	} cn38xx;
4452 	struct cvmx_lmcx_wodt_ctl0_cn38xx cn38xxp2;
4453 	struct cvmx_lmcx_wodt_ctl0_cn38xx cn50xx;
4454 	struct cvmx_lmcx_wodt_ctl0_cn30xx cn52xx;
4455 	struct cvmx_lmcx_wodt_ctl0_cn30xx cn52xxp1;
4456 	struct cvmx_lmcx_wodt_ctl0_cn30xx cn56xx;
4457 	struct cvmx_lmcx_wodt_ctl0_cn30xx cn56xxp1;
4458 	struct cvmx_lmcx_wodt_ctl0_cn38xx cn58xx;
4459 	struct cvmx_lmcx_wodt_ctl0_cn38xx cn58xxp1;
4460 };
4461 
4462 /**
4463  * cvmx_lmc#_wodt_ctl1
4464  *
4465  * LMC_WODT_CTL1 = LMC Write OnDieTermination control
4466  * System designers may desire to terminate DQ/DQS/DM lines for higher
4467  * frequency DDR operations (667MHz and faster), especially on a multi-rank
4468  * system. DDR2 DQ/DM/DQS I/O's have built in Termination resistor that can
4469  * be turned on or off by the controller, after meeting tAOND and tAOF
4470  * timing requirements. Each Rank has its own ODT pin that fans out to all
4471  * the memory parts in that DIMM. System designers may prefer different
4472  * combinations of ODT ON's for read and write into different ranks. Octeon
4473  * supports full programmability by way of the mask register below.
4474  * Each Rank position has its own 8-bit programmable field.
4475  * When the controller does a write to that rank, it sets the 8 ODT pins
4476  * to the MASK pins below. For eg., When doing a write into Rank0, a system
4477  * designer may desire to terminate the lines with the resistor on
4478  * Dimm0/Rank1. The mask WODT_D0_R0 would then be [00000010]. If ODT feature
4479  * is not desired, the DDR parts can be programmed to not look at these pins by
4480  * writing 0 in QS_DIC. Octeon drives the appropriate mask values on the ODT
4481  * pins by default.
4482  * If this feature is not required, write 0 in this register.
4483  *
4484  * Notes:
4485  * Together, the LMC_WODT_CTL1 and LMC_WODT_CTL0 CSRs control the write
4486  * ODT mask. When a given RANK is selected, the WODT mask for that RANK
4487  * is used.  The resulting WODT mask is driven to the DIMMs in the following
4488  * manner:
4489  *            BUNK_ENA=1     BUNK_ENA=0
4490  * Mask[7] -> DIMM3, RANK1    DIMM3
4491  * Mask[6] -> DIMM3, RANK0
4492  * Mask[5] -> DIMM2, RANK1    DIMM2
4493  * Mask[4] -> DIMM2, RANK0
4494  * Mask[3] -> DIMM1, RANK1    DIMM1
4495  * Mask[2] -> DIMM1, RANK0
4496  * Mask[1] -> DIMM0, RANK1    DIMM0
4497  * Mask[0] -> DIMM0, RANK0
4498  */
4499 union cvmx_lmcx_wodt_ctl1 {
4500 	u64 u64;
4501 	struct cvmx_lmcx_wodt_ctl1_s {
4502 		uint64_t reserved_32_63:32;
4503 		uint64_t wodt_d3_r1:8;
4504 		uint64_t wodt_d3_r0:8;
4505 		uint64_t wodt_d2_r1:8;
4506 		uint64_t wodt_d2_r0:8;
4507 	} s;
4508 	struct cvmx_lmcx_wodt_ctl1_s cn30xx;
4509 	struct cvmx_lmcx_wodt_ctl1_s cn31xx;
4510 	struct cvmx_lmcx_wodt_ctl1_s cn52xx;
4511 	struct cvmx_lmcx_wodt_ctl1_s cn52xxp1;
4512 	struct cvmx_lmcx_wodt_ctl1_s cn56xx;
4513 	struct cvmx_lmcx_wodt_ctl1_s cn56xxp1;
4514 };
4515 
4516 /**
4517  * cvmx_lmc#_wodt_mask
4518  *
4519  * System designers may desire to terminate DQ/DQS lines for higher-frequency
4520  * DDR operations, especially on a multirank system. DDR3 DQ/DQS I/Os have
4521  * built-in termination resistors that can be turned on or off by the
4522  * controller, after meeting TAOND and TAOF timing requirements. Each rank
4523  * has its own ODT pin that fans out to all of the memory parts in that DIMM.
4524  * System designers may prefer different combinations of ODT ONs for write
4525  * operations into different ranks. CNXXXX supports full programmability by
4526  * way of the mask register below. Each rank position has its own 8-bit
4527  * programmable field. When the controller does a write to that rank,
4528  * it sets the four ODT pins to the mask pins below. For example, when
4529  * doing a write into Rank0, a system designer may desire to terminate the
4530  * lines with the resistor on DIMM0/Rank1. The mask [WODT_D0_R0] would then
4531  * be [00000010].
4532  *
4533  * CNXXXX drives the appropriate mask values on the ODT pins by default.
4534  * If this feature is not required, write 0x0 in this register. When a
4535  * given RANK is selected, the WODT mask for that RANK is used. The
4536  * resulting WODT mask is driven to the DIMMs in the following manner:
4537  */
4538 union cvmx_lmcx_wodt_mask {
4539 	u64 u64;
4540 	struct cvmx_lmcx_wodt_mask_s {
4541 		uint64_t wodt_d3_r1:8;
4542 		uint64_t wodt_d3_r0:8;
4543 		uint64_t wodt_d2_r1:8;
4544 		uint64_t wodt_d2_r0:8;
4545 		uint64_t wodt_d1_r1:8;
4546 		uint64_t wodt_d1_r0:8;
4547 		uint64_t wodt_d0_r1:8;
4548 		uint64_t wodt_d0_r0:8;
4549 	} s;
4550 	struct cvmx_lmcx_wodt_mask_s cn61xx;
4551 	struct cvmx_lmcx_wodt_mask_s cn63xx;
4552 	struct cvmx_lmcx_wodt_mask_s cn63xxp1;
4553 	struct cvmx_lmcx_wodt_mask_s cn66xx;
4554 	struct cvmx_lmcx_wodt_mask_s cn68xx;
4555 	struct cvmx_lmcx_wodt_mask_s cn68xxp1;
4556 	struct cvmx_lmcx_wodt_mask_cn70xx {
4557 		uint64_t reserved_28_63:36;
4558 		uint64_t wodt_d1_r1:4;
4559 		uint64_t reserved_20_23:4;
4560 		uint64_t wodt_d1_r0:4;
4561 		uint64_t reserved_12_15:4;
4562 		uint64_t wodt_d0_r1:4;
4563 		uint64_t reserved_4_7:4;
4564 		uint64_t wodt_d0_r0:4;
4565 	} cn70xx;
4566 	struct cvmx_lmcx_wodt_mask_cn70xx cn70xxp1;
4567 	struct cvmx_lmcx_wodt_mask_cn70xx cn73xx;
4568 	struct cvmx_lmcx_wodt_mask_cn70xx cn78xx;
4569 	struct cvmx_lmcx_wodt_mask_cn70xx cn78xxp1;
4570 	struct cvmx_lmcx_wodt_mask_s cnf71xx;
4571 	struct cvmx_lmcx_wodt_mask_cn70xx cnf75xx;
4572 };
4573 
4574 #endif
4575