1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020 Marvell International Ltd.
4  */
5 
6 #include <command.h>
7 #include <config.h>
8 #include <dm.h>
9 #include <hang.h>
10 #include <i2c.h>
11 #include <ram.h>
12 #include <time.h>
13 #include <asm/global_data.h>
14 
15 #include <asm/sections.h>
16 #include <linux/io.h>
17 
18 #include <mach/octeon_ddr.h>
19 
20 #define CONFIG_REF_HERTZ	50000000
21 
22 DECLARE_GLOBAL_DATA_PTR;
23 
24 /* Sign of an integer */
_sign(s64 v)25 static s64 _sign(s64 v)
26 {
27 	return (v < 0);
28 }
29 
30 #ifndef DDR_NO_DEBUG
lookup_env(struct ddr_priv * priv,const char * format,...)31 char *lookup_env(struct ddr_priv *priv, const char *format, ...)
32 {
33 	char *s;
34 	unsigned long value;
35 	va_list args;
36 	char buffer[64];
37 
38 	va_start(args, format);
39 	vsnprintf(buffer, sizeof(buffer), format, args);
40 	va_end(args);
41 
42 	s = ddr_getenv_debug(priv, buffer);
43 	if (s) {
44 		value = simple_strtoul(s, NULL, 0);
45 		printf("Parameter found in environment %s=\"%s\" 0x%lx (%ld)\n",
46 		       buffer, s, value, value);
47 	}
48 
49 	return s;
50 }
51 
lookup_env_ull(struct ddr_priv * priv,const char * format,...)52 char *lookup_env_ull(struct ddr_priv *priv, const char *format, ...)
53 {
54 	char *s;
55 	u64 value;
56 	va_list args;
57 	char buffer[64];
58 
59 	va_start(args, format);
60 	vsnprintf(buffer, sizeof(buffer), format, args);
61 	va_end(args);
62 
63 	s = ddr_getenv_debug(priv, buffer);
64 	if (s) {
65 		value = simple_strtoull(s, NULL, 0);
66 		printf("Parameter found in environment. %s = 0x%016llx\n",
67 		       buffer, value);
68 	}
69 
70 	return s;
71 }
72 #else
lookup_env(struct ddr_priv * priv,const char * format,...)73 char *lookup_env(struct ddr_priv *priv, const char *format, ...)
74 {
75 	return NULL;
76 }
77 
lookup_env_ull(struct ddr_priv * priv,const char * format,...)78 char *lookup_env_ull(struct ddr_priv *priv, const char *format, ...)
79 {
80 	return NULL;
81 }
82 #endif
83 
84 /* Number of L2C Tag-and-data sections (TADs) that are connected to LMC. */
85 #define CVMX_L2C_TADS  ((OCTEON_IS_MODEL(OCTEON_CN68XX) ||		\
86 			 OCTEON_IS_MODEL(OCTEON_CN73XX) ||		\
87 			 OCTEON_IS_MODEL(OCTEON_CNF75XX)) ? 4 :		\
88 			(OCTEON_IS_MODEL(OCTEON_CN78XX)) ? 8 : 1)
89 
90 /* Number of L2C IOBs connected to LMC. */
91 #define CVMX_L2C_IOBS  ((OCTEON_IS_MODEL(OCTEON_CN68XX) ||		\
92 			 OCTEON_IS_MODEL(OCTEON_CN78XX) ||		\
93 			 OCTEON_IS_MODEL(OCTEON_CN73XX) ||		\
94 			 OCTEON_IS_MODEL(OCTEON_CNF75XX)) ? 2 : 1)
95 
96 #define CVMX_L2C_MAX_MEMSZ_ALLOWED (OCTEON_IS_OCTEON2() ?		\
97 				    (32 * CVMX_L2C_TADS) :		\
98 				    (OCTEON_IS_MODEL(OCTEON_CN70XX) ?	\
99 				     512 : (OCTEON_IS_OCTEON3() ? 1024 : 0)))
100 
101 /**
102  * Initialize the BIG address in L2C+DRAM to generate proper error
103  * on reading/writing to an non-existent memory location.
104  *
105  * @param node      OCX CPU node number
106  * @param mem_size  Amount of DRAM configured in MB.
107  * @param mode      Allow/Disallow reporting errors L2C_INT_SUM[BIGRD,BIGWR].
108  */
cvmx_l2c_set_big_size(struct ddr_priv * priv,u64 mem_size,int mode)109 static void cvmx_l2c_set_big_size(struct ddr_priv *priv, u64 mem_size, int mode)
110 {
111 	if ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) &&
112 	    !OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X)) {
113 		union cvmx_l2c_big_ctl big_ctl;
114 		int bits = 0, zero_bits = 0;
115 		u64 mem;
116 
117 		if (mem_size > (CVMX_L2C_MAX_MEMSZ_ALLOWED * 1024ull)) {
118 			printf("WARNING: Invalid memory size(%lld) requested, should be <= %lld\n",
119 			       mem_size,
120 			       (u64)CVMX_L2C_MAX_MEMSZ_ALLOWED * 1024);
121 			mem_size = CVMX_L2C_MAX_MEMSZ_ALLOWED * 1024;
122 		}
123 
124 		mem = mem_size;
125 		while (mem) {
126 			if ((mem & 1) == 0)
127 				zero_bits++;
128 			bits++;
129 			mem >>= 1;
130 		}
131 
132 		if ((bits - zero_bits) != 1 || (bits - 9) <= 0) {
133 			printf("ERROR: Invalid DRAM size (%lld) requested, refer to L2C_BIG_CTL[maxdram] for valid options.\n",
134 			       mem_size);
135 			return;
136 		}
137 
138 		/*
139 		 * The BIG/HOLE is logic is not supported in pass1 as per
140 		 * Errata L2C-17736
141 		 */
142 		if (mode == 0 && OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
143 			mode = 1;
144 
145 		big_ctl.u64 = 0;
146 		big_ctl.s.maxdram = bits - 9;
147 		big_ctl.cn61xx.disable = mode;
148 		l2c_wr(priv, CVMX_L2C_BIG_CTL_REL, big_ctl.u64);
149 	}
150 }
151 
octeon3_refclock(u32 alt_refclk,u32 ddr_hertz,struct dimm_config * dimm_config)152 static u32 octeon3_refclock(u32 alt_refclk, u32 ddr_hertz,
153 			    struct dimm_config *dimm_config)
154 {
155 	u32 ddr_ref_hertz = CONFIG_REF_HERTZ;
156 	int ddr_type;
157 	int spd_dimm_type;
158 
159 	debug("%s(%u, %u, %p)\n", __func__, alt_refclk, ddr_hertz, dimm_config);
160 
161 	/* Octeon 3 case... */
162 
163 	/* we know whether alternate refclk is always wanted
164 	 * we also know already if we want 2133 MT/s
165 	 * if alt refclk not always wanted, then probe DDR and
166 	 * DIMM type if DDR4 and RDIMMs, then set desired refclk
167 	 * to 100MHz, otherwise to default (50MHz)
168 	 * depend on ddr_initialize() to do the refclk selection
169 	 * and validation/
170 	 */
171 	if (alt_refclk) {
172 		/*
173 		 * If alternate refclk was specified, let it override
174 		 * everything
175 		 */
176 		ddr_ref_hertz = alt_refclk * 1000000;
177 		printf("%s: DRAM init: %d MHz refclk is REQUESTED ALWAYS\n",
178 		       __func__, alt_refclk);
179 	} else if (ddr_hertz > 1000000000) {
180 		ddr_type = get_ddr_type(dimm_config, 0);
181 		spd_dimm_type = get_dimm_module_type(dimm_config, 0, ddr_type);
182 
183 		debug("ddr type: 0x%x, dimm type: 0x%x\n", ddr_type,
184 		      spd_dimm_type);
185 		/* Is DDR4 and RDIMM just to be sure. */
186 		if (ddr_type == DDR4_DRAM &&
187 		    (spd_dimm_type == 1 || spd_dimm_type == 5 ||
188 		     spd_dimm_type == 8)) {
189 			/* Yes, we require 100MHz refclk, so set it. */
190 			ddr_ref_hertz = 100000000;
191 			puts("DRAM init: 100 MHz refclk is REQUIRED\n");
192 		}
193 	}
194 
195 	debug("%s: speed: %u\n", __func__, ddr_ref_hertz);
196 	return ddr_ref_hertz;
197 }
198 
encode_row_lsb_ddr3(int row_lsb)199 int encode_row_lsb_ddr3(int row_lsb)
200 {
201 	int row_lsb_start = 14;
202 
203 	/* Decoding for row_lsb        */
204 	/* 000: row_lsb = mem_adr[14]  */
205 	/* 001: row_lsb = mem_adr[15]  */
206 	/* 010: row_lsb = mem_adr[16]  */
207 	/* 011: row_lsb = mem_adr[17]  */
208 	/* 100: row_lsb = mem_adr[18]  */
209 	/* 101: row_lsb = mem_adr[19]  */
210 	/* 110: row_lsb = mem_adr[20]  */
211 	/* 111: RESERVED               */
212 
213 	if (octeon_is_cpuid(OCTEON_CN6XXX) ||
214 	    octeon_is_cpuid(OCTEON_CNF7XXX) || octeon_is_cpuid(OCTEON_CN7XXX))
215 		row_lsb_start = 14;
216 	else
217 		printf("ERROR: Unsupported Octeon model: 0x%x\n",
218 		       read_c0_prid());
219 
220 	return row_lsb - row_lsb_start;
221 }
222 
encode_pbank_lsb_ddr3(int pbank_lsb)223 int encode_pbank_lsb_ddr3(int pbank_lsb)
224 {
225 	/* Decoding for pbank_lsb                                        */
226 	/* 0000:DIMM = mem_adr[28]    / rank = mem_adr[27] (if RANK_ENA) */
227 	/* 0001:DIMM = mem_adr[29]    / rank = mem_adr[28]      "        */
228 	/* 0010:DIMM = mem_adr[30]    / rank = mem_adr[29]      "        */
229 	/* 0011:DIMM = mem_adr[31]    / rank = mem_adr[30]      "        */
230 	/* 0100:DIMM = mem_adr[32]    / rank = mem_adr[31]      "        */
231 	/* 0101:DIMM = mem_adr[33]    / rank = mem_adr[32]      "        */
232 	/* 0110:DIMM = mem_adr[34]    / rank = mem_adr[33]      "        */
233 	/* 0111:DIMM = 0              / rank = mem_adr[34]      "        */
234 	/* 1000-1111: RESERVED                                           */
235 
236 	int pbank_lsb_start = 0;
237 
238 	if (octeon_is_cpuid(OCTEON_CN6XXX) ||
239 	    octeon_is_cpuid(OCTEON_CNF7XXX) || octeon_is_cpuid(OCTEON_CN7XXX))
240 		pbank_lsb_start = 28;
241 	else
242 		printf("ERROR: Unsupported Octeon model: 0x%x\n",
243 		       read_c0_prid());
244 
245 	return pbank_lsb - pbank_lsb_start;
246 }
247 
set_ddr_clock_initialized(struct ddr_priv * priv,int if_num,bool inited_flag)248 static void set_ddr_clock_initialized(struct ddr_priv *priv, int if_num,
249 				      bool inited_flag)
250 {
251 	priv->ddr_clock_initialized[if_num] = inited_flag;
252 }
253 
ddr_clock_initialized(struct ddr_priv * priv,int if_num)254 static int ddr_clock_initialized(struct ddr_priv *priv, int if_num)
255 {
256 	return priv->ddr_clock_initialized[if_num];
257 }
258 
set_ddr_memory_preserved(struct ddr_priv * priv)259 static void set_ddr_memory_preserved(struct ddr_priv *priv)
260 {
261 	priv->ddr_memory_preserved = true;
262 }
263 
ddr_memory_preserved(struct ddr_priv * priv)264 bool ddr_memory_preserved(struct ddr_priv *priv)
265 {
266 	return priv->ddr_memory_preserved;
267 }
268 
cn78xx_lmc_dreset_init(struct ddr_priv * priv,int if_num)269 static void cn78xx_lmc_dreset_init(struct ddr_priv *priv, int if_num)
270 {
271 	union cvmx_lmcx_dll_ctl2 dll_ctl2;
272 
273 	/*
274 	 * The remainder of this section describes the sequence for LMCn.
275 	 *
276 	 * 1. If not done already, write LMC(0..3)_DLL_CTL2 to its reset value
277 	 * (except without changing the LMC(0..3)_DLL_CTL2[INTF_EN] value from
278 	 * that set in the prior Step 3), including
279 	 * LMC(0..3)_DLL_CTL2[DRESET] = 1.
280 	 *
281 	 * 2. Without changing any other LMC(0..3)_DLL_CTL2 fields, write
282 	 * LMC(0..3)_DLL_CTL2[DLL_BRINGUP] = 1.
283 	 */
284 
285 	dll_ctl2.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL2(if_num));
286 	dll_ctl2.cn78xx.dll_bringup = 1;
287 	lmc_wr(priv, CVMX_LMCX_DLL_CTL2(if_num), dll_ctl2.u64);
288 
289 	/*
290 	 * 3. Read LMC(0..3)_DLL_CTL2 and wait for the result.
291 	 */
292 
293 	lmc_rd(priv, CVMX_LMCX_DLL_CTL2(if_num));
294 
295 	/*
296 	 * 4. Wait for a minimum of 10 LMC CK cycles.
297 	 */
298 
299 	udelay(1);
300 
301 	/*
302 	 * 5. Without changing any other fields in LMC(0..3)_DLL_CTL2, write
303 	 * LMC(0..3)_DLL_CTL2[QUAD_DLL_ENA] = 1.
304 	 * LMC(0..3)_DLL_CTL2[QUAD_DLL_ENA] must not change after this point
305 	 * without restarting the LMCn DRESET initialization sequence.
306 	 */
307 
308 	dll_ctl2.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL2(if_num));
309 	dll_ctl2.cn78xx.quad_dll_ena = 1;
310 	lmc_wr(priv, CVMX_LMCX_DLL_CTL2(if_num), dll_ctl2.u64);
311 
312 	/*
313 	 * 6. Read LMC(0..3)_DLL_CTL2 and wait for the result.
314 	 */
315 
316 	lmc_rd(priv, CVMX_LMCX_DLL_CTL2(if_num));
317 
318 	/*
319 	 * 7. Wait a minimum of 10 us.
320 	 */
321 
322 	udelay(10);
323 
324 	/*
325 	 * 8. Without changing any other fields in LMC(0..3)_DLL_CTL2, write
326 	 * LMC(0..3)_DLL_CTL2[DLL_BRINGUP] = 0.
327 	 * LMC(0..3)_DLL_CTL2[DLL_BRINGUP] must not change after this point
328 	 * without restarting the LMCn DRESET initialization sequence.
329 	 */
330 
331 	dll_ctl2.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL2(if_num));
332 	dll_ctl2.cn78xx.dll_bringup = 0;
333 	lmc_wr(priv, CVMX_LMCX_DLL_CTL2(if_num), dll_ctl2.u64);
334 
335 	/*
336 	 * 9. Read LMC(0..3)_DLL_CTL2 and wait for the result.
337 	 */
338 
339 	lmc_rd(priv, CVMX_LMCX_DLL_CTL2(if_num));
340 
341 	/*
342 	 * 10. Without changing any other fields in LMC(0..3)_DLL_CTL2, write
343 	 * LMC(0..3)_DLL_CTL2[DRESET] = 0.
344 	 * LMC(0..3)_DLL_CTL2[DRESET] must not change after this point without
345 	 * restarting the LMCn DRESET initialization sequence.
346 	 *
347 	 * After completing LMCn DRESET initialization, all LMC CSRs may be
348 	 * accessed.  Prior to completing LMC DRESET initialization, only
349 	 * LMC(0..3)_DDR_PLL_CTL, LMC(0..3)_DLL_CTL2, LMC(0..3)_RESET_CTL, and
350 	 * LMC(0..3)_COMP_CTL2 LMC CSRs can be accessed.
351 	 */
352 
353 	dll_ctl2.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL2(if_num));
354 	dll_ctl2.cn78xx.dreset = 0;
355 	lmc_wr(priv, CVMX_LMCX_DLL_CTL2(if_num), dll_ctl2.u64);
356 }
357 
initialize_ddr_clock(struct ddr_priv * priv,struct ddr_conf * ddr_conf,u32 cpu_hertz,u32 ddr_hertz,u32 ddr_ref_hertz,int if_num,u32 if_mask)358 int initialize_ddr_clock(struct ddr_priv *priv, struct ddr_conf *ddr_conf,
359 			 u32 cpu_hertz, u32 ddr_hertz, u32 ddr_ref_hertz,
360 			 int if_num, u32 if_mask)
361 {
362 	char *s;
363 
364 	if (ddr_clock_initialized(priv, if_num))
365 		return 0;
366 
367 	if (!ddr_clock_initialized(priv, 0)) {	/* Do this once */
368 		union cvmx_lmcx_reset_ctl reset_ctl;
369 		int i;
370 
371 		/*
372 		 * Check to see if memory is to be preserved and set global
373 		 * flag
374 		 */
375 		for (i = 3; i >= 0; --i) {
376 			if ((if_mask & (1 << i)) == 0)
377 				continue;
378 
379 			reset_ctl.u64 = lmc_rd(priv, CVMX_LMCX_RESET_CTL(i));
380 			if (reset_ctl.s.ddr3psv == 1) {
381 				debug("LMC%d Preserving memory\n", i);
382 				set_ddr_memory_preserved(priv);
383 
384 				/* Re-initialize flags */
385 				reset_ctl.s.ddr3pwarm = 0;
386 				reset_ctl.s.ddr3psoft = 0;
387 				reset_ctl.s.ddr3psv = 0;
388 				lmc_wr(priv, CVMX_LMCX_RESET_CTL(i),
389 				       reset_ctl.u64);
390 			}
391 		}
392 	}
393 
394 	/*
395 	 * ToDo: Add support for these SoCs:
396 	 *
397 	 * if (octeon_is_cpuid(OCTEON_CN63XX) ||
398 	 * octeon_is_cpuid(OCTEON_CN66XX) ||
399 	 * octeon_is_cpuid(OCTEON_CN61XX) || octeon_is_cpuid(OCTEON_CNF71XX))
400 	 *
401 	 * and
402 	 *
403 	 * if (octeon_is_cpuid(OCTEON_CN68XX))
404 	 *
405 	 * and
406 	 *
407 	 * if (octeon_is_cpuid(OCTEON_CN70XX))
408 	 *
409 	 */
410 
411 	if (octeon_is_cpuid(OCTEON_CN78XX) || octeon_is_cpuid(OCTEON_CN73XX) ||
412 	    octeon_is_cpuid(OCTEON_CNF75XX)) {
413 		union cvmx_lmcx_dll_ctl2 dll_ctl2;
414 		union cvmx_lmcx_dll_ctl3 ddr_dll_ctl3;
415 		union cvmx_lmcx_ddr_pll_ctl ddr_pll_ctl;
416 		struct dimm_config *dimm_config_table =
417 			ddr_conf->dimm_config_table;
418 		int en_idx, save_en_idx, best_en_idx = 0;
419 		u64 clkf, clkr, max_clkf = 127;
420 		u64 best_clkf = 0, best_clkr = 0;
421 		u64 best_pll_MHz = 0;
422 		u64 pll_MHz;
423 		u64 min_pll_MHz = 800;
424 		u64 max_pll_MHz = 5000;
425 		u64 error;
426 		u64 best_error;
427 		u64 best_calculated_ddr_hertz = 0;
428 		u64 calculated_ddr_hertz = 0;
429 		u64 orig_ddr_hertz = ddr_hertz;
430 		const int _en[] = { 1, 2, 3, 4, 5, 6, 7, 8, 10, 12 };
431 		int override_pll_settings;
432 		int new_bwadj;
433 		int ddr_type;
434 		int i;
435 
436 		/* ddr_type only indicates DDR4 or DDR3 */
437 		ddr_type = (read_spd(&dimm_config_table[0], 0,
438 				     DDR4_SPD_KEY_BYTE_DEVICE_TYPE) ==
439 			    0x0C) ? DDR4_DRAM : DDR3_DRAM;
440 
441 		/*
442 		 * 5.9 LMC Initialization Sequence
443 		 *
444 		 * There are 13 parts to the LMC initialization procedure:
445 		 *
446 		 * 1. DDR PLL initialization
447 		 *
448 		 * 2. LMC CK initialization
449 		 *
450 		 * 3. LMC interface enable initialization
451 		 *
452 		 * 4. LMC DRESET initialization
453 		 *
454 		 * 5. LMC CK local initialization
455 		 *
456 		 * 6. LMC RESET initialization
457 		 *
458 		 * 7. Early LMC initialization
459 		 *
460 		 * 8. LMC offset training
461 		 *
462 		 * 9. LMC internal Vref training
463 		 *
464 		 * 10. LMC deskew training
465 		 *
466 		 * 11. LMC write leveling
467 		 *
468 		 * 12. LMC read leveling
469 		 *
470 		 * 13. Final LMC initialization
471 		 *
472 		 * CN78XX supports two modes:
473 		 *
474 		 * - two-LMC mode: both LMCs 2/3 must not be enabled
475 		 * (LMC2/3_DLL_CTL2[DRESET] must be set to 1 and
476 		 * LMC2/3_DLL_CTL2[INTF_EN]
477 		 * must be set to 0) and both LMCs 0/1 must be enabled).
478 		 *
479 		 * - four-LMC mode: all four LMCs 0..3 must be enabled.
480 		 *
481 		 * Steps 4 and 6..13 should each be performed for each
482 		 * enabled LMC (either twice or four times). Steps 1..3 and
483 		 * 5 are more global in nature and each must be executed
484 		 * exactly once (not once per LMC) each time the DDR PLL
485 		 * changes or is first brought up. Steps 1..3 and 5 need
486 		 * not be performed if the DDR PLL is stable.
487 		 *
488 		 * Generally, the steps are performed in order. The exception
489 		 * is that the CK local initialization (step 5) must be
490 		 * performed after some DRESET initializations (step 4) and
491 		 * before other DRESET initializations when the DDR PLL is
492 		 * brought up or changed. (The CK local initialization uses
493 		 * information from some LMCs to bring up the other local
494 		 * CKs.) The following text describes these ordering
495 		 * requirements in more detail.
496 		 *
497 		 * Following any chip reset, the DDR PLL must be brought up,
498 		 * and all 13 steps should be executed. Subsequently, it is
499 		 * possible to execute only steps 4 and 6..13, or to execute
500 		 * only steps 8..13.
501 		 *
502 		 * The remainder of this section covers these initialization
503 		 * steps in sequence.
504 		 */
505 
506 		/* Do the following init only once */
507 		if (if_num != 0)
508 			goto not_if0;
509 
510 		/* Only for interface #0 ... */
511 
512 		/*
513 		 * 5.9.3 LMC Interface-Enable Initialization
514 		 *
515 		 * LMC interface-enable initialization (Step 3) must be#
516 		 * performed after Step 2 for each chip reset and whenever
517 		 * the DDR clock speed changes. This step needs to be
518 		 * performed only once, not once per LMC. Perform the
519 		 * following three substeps for the LMC interface-enable
520 		 * initialization:
521 		 *
522 		 * 1. Without changing any other LMC2_DLL_CTL2 fields
523 		 * (LMC(0..3)_DLL_CTL2 should be at their reset values after
524 		 * Step 1), write LMC2_DLL_CTL2[INTF_EN] = 1 if four-LMC
525 		 * mode is desired.
526 		 *
527 		 * 2. Without changing any other LMC3_DLL_CTL2 fields, write
528 		 * LMC3_DLL_CTL2[INTF_EN] = 1 if four-LMC mode is desired.
529 		 *
530 		 * 3. Read LMC2_DLL_CTL2 and wait for the result.
531 		 *
532 		 * The LMC2_DLL_CTL2[INTF_EN] and LMC3_DLL_CTL2[INTF_EN]
533 		 * values should not be changed by software from this point.
534 		 */
535 
536 		for (i = 0; i < 4; ++i) {
537 			if ((if_mask & (1 << i)) == 0)
538 				continue;
539 
540 			dll_ctl2.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL2(i));
541 
542 			dll_ctl2.cn78xx.byp_setting = 0;
543 			dll_ctl2.cn78xx.byp_sel = 0;
544 			dll_ctl2.cn78xx.quad_dll_ena = 0;
545 			dll_ctl2.cn78xx.dreset = 1;
546 			dll_ctl2.cn78xx.dll_bringup = 0;
547 			dll_ctl2.cn78xx.intf_en = 0;
548 
549 			lmc_wr(priv, CVMX_LMCX_DLL_CTL2(i), dll_ctl2.u64);
550 		}
551 
552 		/*
553 		 * ###### Interface enable (intf_en) deferred until after
554 		 * DDR_DIV_RESET=0 #######
555 		 */
556 
557 		/*
558 		 * 5.9.1 DDR PLL Initialization
559 		 *
560 		 * DDR PLL initialization (Step 1) must be performed for each
561 		 * chip reset and whenever the DDR clock speed changes. This
562 		 * step needs to be performed only once, not once per LMC.
563 		 *
564 		 * Perform the following eight substeps to initialize the
565 		 * DDR PLL:
566 		 *
567 		 * 1. If not done already, write all fields in
568 		 * LMC(0..3)_DDR_PLL_CTL and
569 		 * LMC(0..1)_DLL_CTL2 to their reset values, including:
570 		 *
571 		 * .. LMC0_DDR_PLL_CTL[DDR_DIV_RESET] = 1
572 		 * .. LMC0_DLL_CTL2[DRESET] = 1
573 		 *
574 		 * This substep is not necessary after a chip reset.
575 		 *
576 		 */
577 
578 		ddr_pll_ctl.u64 = lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(0));
579 
580 		ddr_pll_ctl.cn78xx.reset_n = 0;
581 		ddr_pll_ctl.cn78xx.ddr_div_reset = 1;
582 		ddr_pll_ctl.cn78xx.phy_dcok = 0;
583 
584 		/*
585 		 * 73XX pass 1.3 has LMC0 DCLK_INVERT tied to 1; earlier
586 		 * 73xx passes are tied to 0
587 		 *
588 		 * 75XX needs LMC0 DCLK_INVERT set to 1 to minimize duty
589 		 * cycle falling points
590 		 *
591 		 * and we default all other chips LMC0 to DCLK_INVERT=0
592 		 */
593 		ddr_pll_ctl.cn78xx.dclk_invert =
594 		    !!(octeon_is_cpuid(OCTEON_CN73XX_PASS1_3) ||
595 		       octeon_is_cpuid(OCTEON_CNF75XX));
596 
597 		/*
598 		 * allow override of LMC0 desired setting for DCLK_INVERT,
599 		 * but not on 73XX;
600 		 * we cannot change LMC0 DCLK_INVERT on 73XX any pass
601 		 */
602 		if (!(octeon_is_cpuid(OCTEON_CN73XX))) {
603 			s = lookup_env(priv, "ddr0_set_dclk_invert");
604 			if (s) {
605 				ddr_pll_ctl.cn78xx.dclk_invert =
606 				    !!simple_strtoul(s, NULL, 0);
607 				debug("LMC0: override DDR_PLL_CTL[dclk_invert] to %d\n",
608 				      ddr_pll_ctl.cn78xx.dclk_invert);
609 			}
610 		}
611 
612 		lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(0), ddr_pll_ctl.u64);
613 		debug("%-45s : 0x%016llx\n", "LMC0: DDR_PLL_CTL",
614 		      ddr_pll_ctl.u64);
615 
616 		// only when LMC1 is active
617 		if (if_mask & 0x2) {
618 			/*
619 			 * For CNF75XX, both LMC0 and LMC1 use the same PLL,
620 			 * so we use the LMC0 setting of DCLK_INVERT for LMC1.
621 			 */
622 			if (!octeon_is_cpuid(OCTEON_CNF75XX)) {
623 				int override = 0;
624 
625 				/*
626 				 * by default, for non-CNF75XX, we want
627 				 * LMC1 toggled LMC0
628 				 */
629 				int lmc0_dclk_invert =
630 				    ddr_pll_ctl.cn78xx.dclk_invert;
631 
632 				/*
633 				 * FIXME: work-around for DDR3 UDIMM problems
634 				 * is to use LMC0 setting on LMC1 and if
635 				 * 73xx pass 1.3, we want to default LMC1
636 				 * DCLK_INVERT to LMC0, not the invert of LMC0
637 				 */
638 				int lmc1_dclk_invert;
639 
640 				lmc1_dclk_invert =
641 					((ddr_type == DDR4_DRAM) &&
642 					 !octeon_is_cpuid(OCTEON_CN73XX_PASS1_3))
643 					? lmc0_dclk_invert ^ 1 :
644 					lmc0_dclk_invert;
645 
646 				/*
647 				 * allow override of LMC1 desired setting for
648 				 * DCLK_INVERT
649 				 */
650 				s = lookup_env(priv, "ddr1_set_dclk_invert");
651 				if (s) {
652 					lmc1_dclk_invert =
653 						!!simple_strtoul(s, NULL, 0);
654 					override = 1;
655 				}
656 				debug("LMC1: %s DDR_PLL_CTL[dclk_invert] to %d (LMC0 %d)\n",
657 				      (override) ? "override" :
658 				      "default", lmc1_dclk_invert,
659 				      lmc0_dclk_invert);
660 
661 				ddr_pll_ctl.cn78xx.dclk_invert =
662 					lmc1_dclk_invert;
663 			}
664 
665 			// but always write LMC1 CSR if it is active
666 			lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(1), ddr_pll_ctl.u64);
667 			debug("%-45s : 0x%016llx\n",
668 			      "LMC1: DDR_PLL_CTL", ddr_pll_ctl.u64);
669 		}
670 
671 		/*
672 		 * 2. If the current DRAM contents are not preserved (see
673 		 * LMC(0..3)_RESET_ CTL[DDR3PSV]), this is also an appropriate
674 		 * time to assert the RESET# pin of the DDR3/DDR4 DRAM parts.
675 		 * If desired, write
676 		 * LMC0_RESET_ CTL[DDR3RST] = 0 without modifying any other
677 		 * LMC0_RESET_CTL fields to assert the DDR_RESET_L pin.
678 		 * No action is required here to assert DDR_RESET_L
679 		 * following a chip reset. Refer to Section 5.9.6. Do this
680 		 * for all enabled LMCs.
681 		 */
682 
683 		for (i = 0; (!ddr_memory_preserved(priv)) && i < 4; ++i) {
684 			union cvmx_lmcx_reset_ctl reset_ctl;
685 
686 			if ((if_mask & (1 << i)) == 0)
687 				continue;
688 
689 			reset_ctl.u64 = lmc_rd(priv, CVMX_LMCX_RESET_CTL(i));
690 			reset_ctl.cn78xx.ddr3rst = 0;	/* Reset asserted */
691 			debug("LMC%d Asserting DDR_RESET_L\n", i);
692 			lmc_wr(priv, CVMX_LMCX_RESET_CTL(i), reset_ctl.u64);
693 			lmc_rd(priv, CVMX_LMCX_RESET_CTL(i));
694 		}
695 
696 		/*
697 		 * 3. Without changing any other LMC0_DDR_PLL_CTL values,
698 		 * write LMC0_DDR_PLL_CTL[CLKF] with a value that gives a
699 		 * desired DDR PLL speed. The LMC0_DDR_PLL_CTL[CLKF] value
700 		 * should be selected in conjunction with the post-scalar
701 		 * divider values for LMC (LMC0_DDR_PLL_CTL[DDR_PS_EN]) so
702 		 * that the desired LMC CK speeds are is produced (all
703 		 * enabled LMCs must run the same speed). Section 5.14
704 		 * describes LMC0_DDR_PLL_CTL[CLKF] and
705 		 * LMC0_DDR_PLL_CTL[DDR_PS_EN] programmings that produce
706 		 * the desired LMC CK speed. Section 5.9.2 describes LMC CK
707 		 * initialization, which can be done separately from the DDR
708 		 * PLL initialization described in this section.
709 		 *
710 		 * The LMC0_DDR_PLL_CTL[CLKF] value must not change after
711 		 * this point without restarting this SDRAM PLL
712 		 * initialization sequence.
713 		 */
714 
715 		/* Init to max error */
716 		error = ddr_hertz;
717 		best_error = ddr_hertz;
718 
719 		debug("DDR Reference Hertz = %d\n", ddr_ref_hertz);
720 
721 		while (best_error == ddr_hertz) {
722 			for (clkr = 0; clkr < 4; ++clkr) {
723 				for (en_idx =
724 				     sizeof(_en) / sizeof(int) -
725 				     1; en_idx >= 0; --en_idx) {
726 					save_en_idx = en_idx;
727 					clkf =
728 					    ((ddr_hertz) *
729 					     (clkr + 1) * (_en[save_en_idx]));
730 					clkf = divide_nint(clkf, ddr_ref_hertz)
731 					    - 1;
732 					pll_MHz =
733 					    ddr_ref_hertz *
734 					    (clkf + 1) / (clkr + 1) / 1000000;
735 					calculated_ddr_hertz =
736 					    ddr_ref_hertz *
737 					    (clkf +
738 					     1) / ((clkr +
739 						    1) * (_en[save_en_idx]));
740 					error =
741 					    ddr_hertz - calculated_ddr_hertz;
742 
743 					if (pll_MHz < min_pll_MHz ||
744 					    pll_MHz > max_pll_MHz)
745 						continue;
746 					if (clkf > max_clkf) {
747 						/*
748 						 * PLL requires clkf to be
749 						 * limited
750 						 */
751 						continue;
752 					}
753 					if (abs(error) > abs(best_error))
754 						continue;
755 
756 					debug("clkr: %2llu, en[%d]: %2d, clkf: %4llu, pll_MHz: %4llu, ddr_hertz: %8llu, error: %8lld\n",
757 					      clkr, save_en_idx,
758 					      _en[save_en_idx], clkf, pll_MHz,
759 					     calculated_ddr_hertz, error);
760 
761 					/* Favor the highest PLL frequency. */
762 					if (abs(error) < abs(best_error) ||
763 					    pll_MHz > best_pll_MHz) {
764 						best_pll_MHz = pll_MHz;
765 						best_calculated_ddr_hertz =
766 							calculated_ddr_hertz;
767 						best_error = error;
768 						best_clkr = clkr;
769 						best_clkf = clkf;
770 						best_en_idx = save_en_idx;
771 					}
772 				}
773 			}
774 
775 			override_pll_settings = 0;
776 
777 			s = lookup_env(priv, "ddr_pll_clkr");
778 			if (s) {
779 				best_clkr = simple_strtoul(s, NULL, 0);
780 				override_pll_settings = 1;
781 			}
782 
783 			s = lookup_env(priv, "ddr_pll_clkf");
784 			if (s) {
785 				best_clkf = simple_strtoul(s, NULL, 0);
786 				override_pll_settings = 1;
787 			}
788 
789 			s = lookup_env(priv, "ddr_pll_en_idx");
790 			if (s) {
791 				best_en_idx = simple_strtoul(s, NULL, 0);
792 				override_pll_settings = 1;
793 			}
794 
795 			if (override_pll_settings) {
796 				best_pll_MHz =
797 				    ddr_ref_hertz * (best_clkf +
798 						     1) /
799 				    (best_clkr + 1) / 1000000;
800 				best_calculated_ddr_hertz =
801 				    ddr_ref_hertz * (best_clkf +
802 						     1) /
803 				    ((best_clkr + 1) * (_en[best_en_idx]));
804 				best_error =
805 				    ddr_hertz - best_calculated_ddr_hertz;
806 			}
807 
808 			debug("clkr: %2llu, en[%d]: %2d, clkf: %4llu, pll_MHz: %4llu, ddr_hertz: %8llu, error: %8lld <==\n",
809 			      best_clkr, best_en_idx, _en[best_en_idx],
810 			      best_clkf, best_pll_MHz,
811 			      best_calculated_ddr_hertz, best_error);
812 
813 			/*
814 			 * Try lowering the frequency if we can't get a
815 			 * working configuration
816 			 */
817 			if (best_error == ddr_hertz) {
818 				if (ddr_hertz < orig_ddr_hertz - 10000000)
819 					break;
820 				ddr_hertz -= 1000000;
821 				best_error = ddr_hertz;
822 			}
823 		}
824 
825 		if (best_error == ddr_hertz) {
826 			printf("ERROR: Can not compute a legal DDR clock speed configuration.\n");
827 			return -1;
828 		}
829 
830 		new_bwadj = (best_clkf + 1) / 10;
831 		debug("bwadj: %2d\n", new_bwadj);
832 
833 		s = lookup_env(priv, "ddr_pll_bwadj");
834 		if (s) {
835 			new_bwadj = strtoul(s, NULL, 0);
836 			debug("bwadj: %2d\n", new_bwadj);
837 		}
838 
839 		for (i = 0; i < 2; ++i) {
840 			if ((if_mask & (1 << i)) == 0)
841 				continue;
842 
843 			ddr_pll_ctl.u64 =
844 			    lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
845 			debug("LMC%d: DDR_PLL_CTL                             : 0x%016llx\n",
846 			      i, ddr_pll_ctl.u64);
847 
848 			ddr_pll_ctl.cn78xx.ddr_ps_en = best_en_idx;
849 			ddr_pll_ctl.cn78xx.clkf = best_clkf;
850 			ddr_pll_ctl.cn78xx.clkr = best_clkr;
851 			ddr_pll_ctl.cn78xx.reset_n = 0;
852 			ddr_pll_ctl.cn78xx.bwadj = new_bwadj;
853 
854 			lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(i), ddr_pll_ctl.u64);
855 			debug("LMC%d: DDR_PLL_CTL                             : 0x%016llx\n",
856 			      i, ddr_pll_ctl.u64);
857 
858 			/*
859 			 * For cnf75xx LMC0 and LMC1 use the same PLL so
860 			 * only program LMC0 PLL.
861 			 */
862 			if (octeon_is_cpuid(OCTEON_CNF75XX))
863 				break;
864 		}
865 
866 		for (i = 0; i < 4; ++i) {
867 			if ((if_mask & (1 << i)) == 0)
868 				continue;
869 
870 			/*
871 			 * 4. Read LMC0_DDR_PLL_CTL and wait for the result.
872 			 */
873 
874 			lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
875 
876 			/*
877 			 * 5. Wait a minimum of 3 us.
878 			 */
879 
880 			udelay(3);	/* Wait 3 us */
881 
882 			/*
883 			 * 6. Write LMC0_DDR_PLL_CTL[RESET_N] = 1 without
884 			 * changing any other LMC0_DDR_PLL_CTL values.
885 			 */
886 
887 			ddr_pll_ctl.u64 =
888 			    lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
889 			ddr_pll_ctl.cn78xx.reset_n = 1;
890 			lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(i), ddr_pll_ctl.u64);
891 
892 			/*
893 			 * 7. Read LMC0_DDR_PLL_CTL and wait for the result.
894 			 */
895 
896 			lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
897 
898 			/*
899 			 * 8. Wait a minimum of 25 us.
900 			 */
901 
902 			udelay(25);	/* Wait 25 us */
903 
904 			/*
905 			 * For cnf75xx LMC0 and LMC1 use the same PLL so
906 			 * only program LMC0 PLL.
907 			 */
908 			if (octeon_is_cpuid(OCTEON_CNF75XX))
909 				break;
910 		}
911 
912 		for (i = 0; i < 4; ++i) {
913 			if ((if_mask & (1 << i)) == 0)
914 				continue;
915 
916 			/*
917 			 * 5.9.2 LMC CK Initialization
918 			 *
919 			 * DDR PLL initialization must be completed prior to
920 			 * starting LMC CK initialization.
921 			 *
922 			 * Perform the following substeps to initialize the
923 			 * LMC CK:
924 			 *
925 			 * 1. Without changing any other LMC(0..3)_DDR_PLL_CTL
926 			 * values, write
927 			 * LMC(0..3)_DDR_PLL_CTL[DDR_DIV_RESET] = 1 and
928 			 * LMC(0..3)_DDR_PLL_CTL[DDR_PS_EN] with the
929 			 * appropriate value to get the desired LMC CK speed.
930 			 * Section 5.14 discusses CLKF and DDR_PS_EN
931 			 * programmings.  The LMC(0..3)_DDR_PLL_CTL[DDR_PS_EN]
932 			 * must not change after this point without restarting
933 			 * this LMC CK initialization sequence.
934 			 */
935 
936 			ddr_pll_ctl.u64 = lmc_rd(priv,
937 						 CVMX_LMCX_DDR_PLL_CTL(i));
938 			ddr_pll_ctl.cn78xx.ddr_div_reset = 1;
939 			lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(i), ddr_pll_ctl.u64);
940 
941 			/*
942 			 * 2. Without changing any other fields in
943 			 * LMC(0..3)_DDR_PLL_CTL, write
944 			 * LMC(0..3)_DDR_PLL_CTL[DDR4_MODE] = 0.
945 			 */
946 
947 			ddr_pll_ctl.u64 =
948 			    lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
949 			ddr_pll_ctl.cn78xx.ddr4_mode =
950 			    (ddr_type == DDR4_DRAM) ? 1 : 0;
951 			lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(i), ddr_pll_ctl.u64);
952 
953 			/*
954 			 * 3. Read LMC(0..3)_DDR_PLL_CTL and wait for the
955 			 * result.
956 			 */
957 
958 			lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
959 
960 			/*
961 			 * 4. Wait a minimum of 1 us.
962 			 */
963 
964 			udelay(1);	/* Wait 1 us */
965 
966 			/*
967 			 * ###### Steps 5 through 7 deferred until after
968 			 * DDR_DIV_RESET=0 #######
969 			 */
970 
971 			/*
972 			 * 8. Without changing any other LMC(0..3)_COMP_CTL2
973 			 * values, write
974 			 * LMC(0..3)_COMP_CTL2[CK_CTL,CONTROL_CTL,CMD_CTL]
975 			 * to the desired DDR*_CK_*_P control and command
976 			 * signals drive strength.
977 			 */
978 
979 			union cvmx_lmcx_comp_ctl2 comp_ctl2;
980 			const struct ddr3_custom_config *custom_lmc_config =
981 			    &ddr_conf->custom_lmc_config;
982 
983 			comp_ctl2.u64 = lmc_rd(priv, CVMX_LMCX_COMP_CTL2(i));
984 
985 			/* Default 4=34.3 ohm */
986 			comp_ctl2.cn78xx.dqx_ctl =
987 			    (custom_lmc_config->dqx_ctl ==
988 			     0) ? 4 : custom_lmc_config->dqx_ctl;
989 			/* Default 4=34.3 ohm */
990 			comp_ctl2.cn78xx.ck_ctl =
991 			    (custom_lmc_config->ck_ctl ==
992 			     0) ? 4 : custom_lmc_config->ck_ctl;
993 			/* Default 4=34.3 ohm */
994 			comp_ctl2.cn78xx.cmd_ctl =
995 			    (custom_lmc_config->cmd_ctl ==
996 			     0) ? 4 : custom_lmc_config->cmd_ctl;
997 
998 			comp_ctl2.cn78xx.rodt_ctl = 0x4;	/* 60 ohm */
999 
1000 			comp_ctl2.cn70xx.ptune_offset =
1001 			    (abs(custom_lmc_config->ptune_offset) & 0x7)
1002 			    | (_sign(custom_lmc_config->ptune_offset) << 3);
1003 			comp_ctl2.cn70xx.ntune_offset =
1004 			    (abs(custom_lmc_config->ntune_offset) & 0x7)
1005 			    | (_sign(custom_lmc_config->ntune_offset) << 3);
1006 
1007 			s = lookup_env(priv, "ddr_clk_ctl");
1008 			if (s) {
1009 				comp_ctl2.cn78xx.ck_ctl =
1010 				    simple_strtoul(s, NULL, 0);
1011 			}
1012 
1013 			s = lookup_env(priv, "ddr_ck_ctl");
1014 			if (s) {
1015 				comp_ctl2.cn78xx.ck_ctl =
1016 				    simple_strtoul(s, NULL, 0);
1017 			}
1018 
1019 			s = lookup_env(priv, "ddr_cmd_ctl");
1020 			if (s) {
1021 				comp_ctl2.cn78xx.cmd_ctl =
1022 				    simple_strtoul(s, NULL, 0);
1023 			}
1024 
1025 			s = lookup_env(priv, "ddr_dqx_ctl");
1026 			if (s) {
1027 				comp_ctl2.cn78xx.dqx_ctl =
1028 				    simple_strtoul(s, NULL, 0);
1029 			}
1030 
1031 			s = lookup_env(priv, "ddr_ptune_offset");
1032 			if (s) {
1033 				comp_ctl2.cn78xx.ptune_offset =
1034 				    simple_strtoul(s, NULL, 0);
1035 			}
1036 
1037 			s = lookup_env(priv, "ddr_ntune_offset");
1038 			if (s) {
1039 				comp_ctl2.cn78xx.ntune_offset =
1040 				    simple_strtoul(s, NULL, 0);
1041 			}
1042 
1043 			lmc_wr(priv, CVMX_LMCX_COMP_CTL2(i), comp_ctl2.u64);
1044 
1045 			/*
1046 			 * 9. Read LMC(0..3)_DDR_PLL_CTL and wait for the
1047 			 * result.
1048 			 */
1049 
1050 			lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
1051 
1052 			/*
1053 			 * 10. Wait a minimum of 200 ns.
1054 			 */
1055 
1056 			udelay(1);	/* Wait 1 us */
1057 
1058 			/*
1059 			 * 11. Without changing any other
1060 			 * LMC(0..3)_DDR_PLL_CTL values, write
1061 			 * LMC(0..3)_DDR_PLL_CTL[DDR_DIV_RESET] = 0.
1062 			 */
1063 
1064 			ddr_pll_ctl.u64 = lmc_rd(priv,
1065 						 CVMX_LMCX_DDR_PLL_CTL(i));
1066 			ddr_pll_ctl.cn78xx.ddr_div_reset = 0;
1067 			lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(i), ddr_pll_ctl.u64);
1068 
1069 			/*
1070 			 * 12. Read LMC(0..3)_DDR_PLL_CTL and wait for the
1071 			 * result.
1072 			 */
1073 
1074 			lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
1075 
1076 			/*
1077 			 * 13. Wait a minimum of 200 ns.
1078 			 */
1079 
1080 			udelay(1);	/* Wait 1 us */
1081 		}
1082 
1083 		/*
1084 		 * Relocated Interface Enable (intf_en) Step
1085 		 */
1086 		for (i = (octeon_is_cpuid(OCTEON_CN73XX) ||
1087 			  octeon_is_cpuid(OCTEON_CNF75XX)) ? 1 : 2;
1088 		     i < 4; ++i) {
1089 			/*
1090 			 * This step is only necessary for LMC 2 and 3 in
1091 			 * 4-LMC mode. The mask will cause the unpopulated
1092 			 * interfaces to be skipped.
1093 			 */
1094 			if ((if_mask & (1 << i)) == 0)
1095 				continue;
1096 
1097 			dll_ctl2.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL2(i));
1098 			dll_ctl2.cn78xx.intf_en = 1;
1099 			lmc_wr(priv, CVMX_LMCX_DLL_CTL2(i), dll_ctl2.u64);
1100 			lmc_rd(priv, CVMX_LMCX_DLL_CTL2(i));
1101 		}
1102 
1103 		/*
1104 		 * Relocated PHY_DCOK Step
1105 		 */
1106 		for (i = 0; i < 4; ++i) {
1107 			if ((if_mask & (1 << i)) == 0)
1108 				continue;
1109 			/*
1110 			 * 5. Without changing any other fields in
1111 			 * LMC(0..3)_DDR_PLL_CTL, write
1112 			 * LMC(0..3)_DDR_PLL_CTL[PHY_DCOK] = 1.
1113 			 */
1114 
1115 			ddr_pll_ctl.u64 = lmc_rd(priv,
1116 						 CVMX_LMCX_DDR_PLL_CTL(i));
1117 			ddr_pll_ctl.cn78xx.phy_dcok = 1;
1118 			lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(i), ddr_pll_ctl.u64);
1119 			/*
1120 			 * 6. Read LMC(0..3)_DDR_PLL_CTL and wait for
1121 			 * the result.
1122 			 */
1123 
1124 			lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(i));
1125 
1126 			/*
1127 			 * 7. Wait a minimum of 20 us.
1128 			 */
1129 
1130 			udelay(20);	/* Wait 20 us */
1131 		}
1132 
1133 		/*
1134 		 * 5.9.4 LMC DRESET Initialization
1135 		 *
1136 		 * All of the DDR PLL, LMC global CK, and LMC interface
1137 		 * enable initializations must be completed prior to starting
1138 		 * this LMC DRESET initialization (Step 4).
1139 		 *
1140 		 * This LMC DRESET step is done for all enabled LMCs.
1141 		 *
1142 		 * There are special constraints on the ordering of DRESET
1143 		 * initialization (Steps 4) and CK local initialization
1144 		 * (Step 5) whenever CK local initialization must be executed.
1145 		 * CK local initialization must be executed whenever the DDR
1146 		 * PLL is being brought up (for each chip reset* and whenever
1147 		 * the DDR clock speed changes).
1148 		 *
1149 		 * When Step 5 must be executed in the two-LMC mode case:
1150 		 * - LMC0 DRESET initialization must occur before Step 5.
1151 		 * - LMC1 DRESET initialization must occur after Step 5.
1152 		 *
1153 		 * When Step 5 must be executed in the four-LMC mode case:
1154 		 * - LMC2 and LMC3 DRESET initialization must occur before
1155 		 *   Step 5.
1156 		 * - LMC0 and LMC1 DRESET initialization must occur after
1157 		 *   Step 5.
1158 		 */
1159 
1160 		if (octeon_is_cpuid(OCTEON_CN73XX)) {
1161 			/* ONE-LMC or TWO-LMC MODE BEFORE STEP 5 for cn73xx */
1162 			cn78xx_lmc_dreset_init(priv, 0);
1163 		} else if (octeon_is_cpuid(OCTEON_CNF75XX)) {
1164 			if (if_mask == 0x3) {
1165 				/*
1166 				 * 2-LMC Mode: LMC1 DRESET must occur
1167 				 * before Step 5
1168 				 */
1169 				cn78xx_lmc_dreset_init(priv, 1);
1170 			}
1171 		} else {
1172 			/* TWO-LMC MODE DRESET BEFORE STEP 5 */
1173 			if (if_mask == 0x3)
1174 				cn78xx_lmc_dreset_init(priv, 0);
1175 
1176 			/* FOUR-LMC MODE BEFORE STEP 5 */
1177 			if (if_mask == 0xf) {
1178 				cn78xx_lmc_dreset_init(priv, 2);
1179 				cn78xx_lmc_dreset_init(priv, 3);
1180 			}
1181 		}
1182 
1183 		/*
1184 		 * 5.9.5 LMC CK Local Initialization
1185 		 *
1186 		 * All of DDR PLL, LMC global CK, and LMC interface-enable
1187 		 * initializations must be completed prior to starting this
1188 		 * LMC CK local initialization (Step 5).
1189 		 *
1190 		 * LMC CK Local initialization must be performed for each
1191 		 * chip reset and whenever the DDR clock speed changes. This
1192 		 * step needs to be performed only once, not once per LMC.
1193 		 *
1194 		 * There are special constraints on the ordering of DRESET
1195 		 * initialization (Steps 4) and CK local initialization
1196 		 * (Step 5) whenever CK local initialization must be executed.
1197 		 * CK local initialization must be executed whenever the
1198 		 * DDR PLL is being brought up (for each chip reset and
1199 		 * whenever the DDR clock speed changes).
1200 		 *
1201 		 * When Step 5 must be executed in the two-LMC mode case:
1202 		 * - LMC0 DRESET initialization must occur before Step 5.
1203 		 * - LMC1 DRESET initialization must occur after Step 5.
1204 		 *
1205 		 * When Step 5 must be executed in the four-LMC mode case:
1206 		 * - LMC2 and LMC3 DRESET initialization must occur before
1207 		 *   Step 5.
1208 		 * - LMC0 and LMC1 DRESET initialization must occur after
1209 		 *   Step 5.
1210 		 *
1211 		 * LMC CK local initialization is different depending on
1212 		 * whether two-LMC or four-LMC modes are desired.
1213 		 */
1214 
1215 		if (if_mask == 0x3) {
1216 			int temp_lmc_if_num = octeon_is_cpuid(OCTEON_CNF75XX) ?
1217 				1 : 0;
1218 
1219 			/*
1220 			 * 5.9.5.1 LMC CK Local Initialization for Two-LMC
1221 			 * Mode
1222 			 *
1223 			 * 1. Write LMC0_DLL_CTL3 to its reset value. (Note
1224 			 * that LMC0_DLL_CTL3[DLL_90_BYTE_SEL] = 0x2 .. 0x8
1225 			 * should also work.)
1226 			 */
1227 
1228 			ddr_dll_ctl3.u64 = 0;
1229 			ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 1;
1230 
1231 			if (octeon_is_cpuid(OCTEON_CNF75XX))
1232 				ddr_dll_ctl3.cn78xx.dll90_byte_sel = 7;
1233 			else
1234 				ddr_dll_ctl3.cn78xx.dll90_byte_sel = 1;
1235 
1236 			lmc_wr(priv,
1237 			       CVMX_LMCX_DLL_CTL3(temp_lmc_if_num),
1238 			       ddr_dll_ctl3.u64);
1239 
1240 			/*
1241 			 * 2. Read LMC0_DLL_CTL3 and wait for the result.
1242 			 */
1243 
1244 			lmc_rd(priv, CVMX_LMCX_DLL_CTL3(temp_lmc_if_num));
1245 
1246 			/*
1247 			 * 3. Without changing any other fields in
1248 			 * LMC0_DLL_CTL3, write
1249 			 * LMC0_DLL_CTL3[DCLK90_FWD] = 1.  Writing
1250 			 * LMC0_DLL_CTL3[DCLK90_FWD] = 1
1251 			 * causes clock-delay information to be forwarded
1252 			 * from LMC0 to LMC1.
1253 			 */
1254 
1255 			ddr_dll_ctl3.cn78xx.dclk90_fwd = 1;
1256 			lmc_wr(priv,
1257 			       CVMX_LMCX_DLL_CTL3(temp_lmc_if_num),
1258 			       ddr_dll_ctl3.u64);
1259 
1260 			/*
1261 			 * 4. Read LMC0_DLL_CTL3 and wait for the result.
1262 			 */
1263 
1264 			lmc_rd(priv, CVMX_LMCX_DLL_CTL3(temp_lmc_if_num));
1265 		}
1266 
1267 		if (if_mask == 0xf) {
1268 			/*
1269 			 * 5.9.5.2 LMC CK Local Initialization for Four-LMC
1270 			 * Mode
1271 			 *
1272 			 * 1. Write LMC2_DLL_CTL3 to its reset value except
1273 			 * LMC2_DLL_CTL3[DLL90_BYTE_SEL] = 0x7.
1274 			 */
1275 
1276 			ddr_dll_ctl3.u64 = 0;
1277 			ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 1;
1278 			ddr_dll_ctl3.cn78xx.dll90_byte_sel = 7;
1279 			lmc_wr(priv, CVMX_LMCX_DLL_CTL3(2), ddr_dll_ctl3.u64);
1280 
1281 			/*
1282 			 * 2. Write LMC3_DLL_CTL3 to its reset value except
1283 			 * LMC3_DLL_CTL3[DLL90_BYTE_SEL] = 0x2.
1284 			 */
1285 
1286 			ddr_dll_ctl3.u64 = 0;
1287 			ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 1;
1288 			ddr_dll_ctl3.cn78xx.dll90_byte_sel = 2;
1289 			lmc_wr(priv, CVMX_LMCX_DLL_CTL3(3), ddr_dll_ctl3.u64);
1290 
1291 			/*
1292 			 * 3. Read LMC3_DLL_CTL3 and wait for the result.
1293 			 */
1294 
1295 			lmc_rd(priv, CVMX_LMCX_DLL_CTL3(3));
1296 
1297 			/*
1298 			 * 4. Without changing any other fields in
1299 			 * LMC2_DLL_CTL3, write LMC2_DLL_CTL3[DCLK90_FWD] = 1
1300 			 * and LMC2_DLL_CTL3[DCLK90_RECAL_ DIS] = 1.
1301 			 * Writing LMC2_DLL_CTL3[DCLK90_FWD] = 1 causes LMC 2
1302 			 * to forward clockdelay information to LMC0. Setting
1303 			 * LMC2_DLL_CTL3[DCLK90_RECAL_DIS] to 1 prevents LMC2
1304 			 * from periodically recalibrating this delay
1305 			 * information.
1306 			 */
1307 
1308 			ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(2));
1309 			ddr_dll_ctl3.cn78xx.dclk90_fwd = 1;
1310 			ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 1;
1311 			lmc_wr(priv, CVMX_LMCX_DLL_CTL3(2), ddr_dll_ctl3.u64);
1312 
1313 			/*
1314 			 * 5. Without changing any other fields in
1315 			 * LMC3_DLL_CTL3, write LMC3_DLL_CTL3[DCLK90_FWD] = 1
1316 			 * and LMC3_DLL_CTL3[DCLK90_RECAL_ DIS] = 1.
1317 			 * Writing LMC3_DLL_CTL3[DCLK90_FWD] = 1 causes LMC3
1318 			 * to forward clockdelay information to LMC1. Setting
1319 			 * LMC3_DLL_CTL3[DCLK90_RECAL_DIS] to 1 prevents LMC3
1320 			 * from periodically recalibrating this delay
1321 			 * information.
1322 			 */
1323 
1324 			ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(3));
1325 			ddr_dll_ctl3.cn78xx.dclk90_fwd = 1;
1326 			ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 1;
1327 			lmc_wr(priv, CVMX_LMCX_DLL_CTL3(3), ddr_dll_ctl3.u64);
1328 
1329 			/*
1330 			 * 6. Read LMC3_DLL_CTL3 and wait for the result.
1331 			 */
1332 
1333 			lmc_rd(priv, CVMX_LMCX_DLL_CTL3(3));
1334 		}
1335 
1336 		if (octeon_is_cpuid(OCTEON_CNF75XX)) {
1337 			/*
1338 			 * cnf75xx 2-LMC Mode: LMC0 DRESET must occur after
1339 			 * Step 5, Do LMC0 for 1-LMC Mode here too
1340 			 */
1341 			cn78xx_lmc_dreset_init(priv, 0);
1342 		}
1343 
1344 		/* TWO-LMC MODE AFTER STEP 5 */
1345 		if (if_mask == 0x3) {
1346 			if (octeon_is_cpuid(OCTEON_CNF75XX)) {
1347 				/*
1348 				 * cnf75xx 2-LMC Mode: LMC0 DRESET must
1349 				 * occur after Step 5
1350 				 */
1351 				cn78xx_lmc_dreset_init(priv, 0);
1352 			} else {
1353 				cn78xx_lmc_dreset_init(priv, 1);
1354 			}
1355 		}
1356 
1357 		/* FOUR-LMC MODE AFTER STEP 5 */
1358 		if (if_mask == 0xf) {
1359 			cn78xx_lmc_dreset_init(priv, 0);
1360 			cn78xx_lmc_dreset_init(priv, 1);
1361 
1362 			/*
1363 			 * Enable periodic recalibration of DDR90 delay
1364 			 * line in.
1365 			 */
1366 			ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(0));
1367 			ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 0;
1368 			lmc_wr(priv, CVMX_LMCX_DLL_CTL3(0), ddr_dll_ctl3.u64);
1369 			ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(1));
1370 			ddr_dll_ctl3.cn78xx.dclk90_recal_dis = 0;
1371 			lmc_wr(priv, CVMX_LMCX_DLL_CTL3(1), ddr_dll_ctl3.u64);
1372 		}
1373 
1374 		/* Enable fine tune mode for all LMCs */
1375 		for (i = 0; i < 4; ++i) {
1376 			if ((if_mask & (1 << i)) == 0)
1377 				continue;
1378 			ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(i));
1379 			ddr_dll_ctl3.cn78xx.fine_tune_mode = 1;
1380 			lmc_wr(priv, CVMX_LMCX_DLL_CTL3(i), ddr_dll_ctl3.u64);
1381 		}
1382 
1383 		/*
1384 		 * Enable the trim circuit on the appropriate channels to
1385 		 * adjust the DDR clock duty cycle for chips that support
1386 		 * it
1387 		 */
1388 		if (octeon_is_cpuid(OCTEON_CN78XX_PASS2_X) ||
1389 		    octeon_is_cpuid(OCTEON_CN73XX) ||
1390 		    octeon_is_cpuid(OCTEON_CNF75XX)) {
1391 			union cvmx_lmcx_phy_ctl lmc_phy_ctl;
1392 			int i;
1393 
1394 			for (i = 0; i < 4; ++i) {
1395 				if ((if_mask & (1 << i)) == 0)
1396 					continue;
1397 
1398 				lmc_phy_ctl.u64 =
1399 				    lmc_rd(priv, CVMX_LMCX_PHY_CTL(i));
1400 
1401 				if (octeon_is_cpuid(OCTEON_CNF75XX) ||
1402 				    octeon_is_cpuid(OCTEON_CN73XX_PASS1_3)) {
1403 					/* Both LMCs */
1404 					lmc_phy_ctl.s.lv_mode = 0;
1405 				} else {
1406 					/* Odd LMCs = 0, Even LMCs = 1 */
1407 					lmc_phy_ctl.s.lv_mode = (~i) & 1;
1408 				}
1409 
1410 				debug("LMC%d: PHY_CTL                                 : 0x%016llx\n",
1411 				      i, lmc_phy_ctl.u64);
1412 				lmc_wr(priv, CVMX_LMCX_PHY_CTL(i),
1413 				       lmc_phy_ctl.u64);
1414 			}
1415 		}
1416 	}
1417 
1418 	/*
1419 	 * 5.9.6 LMC RESET Initialization
1420 	 *
1421 	 * NOTE: this is now done as the first step in
1422 	 * init_octeon3_ddr3_interface, rather than the last step in clock
1423 	 * init. This reorg allows restarting per-LMC initialization should
1424 	 * problems be encountered, rather than being forced to resort to
1425 	 * resetting the chip and starting all over.
1426 	 *
1427 	 * Look for the code in octeon3_lmc.c: perform_lmc_reset().
1428 	 */
1429 
1430 	/* Fallthrough for all interfaces... */
1431 not_if0:
1432 
1433 	/*
1434 	 * Start the DDR clock so that its frequency can be measured.
1435 	 * For some chips we must activate the memory controller with
1436 	 * init_start to make the DDR clock start to run.
1437 	 */
1438 	if ((!octeon_is_cpuid(OCTEON_CN6XXX)) &&
1439 	    (!octeon_is_cpuid(OCTEON_CNF7XXX)) &&
1440 	    (!octeon_is_cpuid(OCTEON_CN7XXX))) {
1441 		union cvmx_lmcx_mem_cfg0 mem_cfg0;
1442 
1443 		mem_cfg0.u64 = 0;
1444 		mem_cfg0.s.init_start = 1;
1445 		lmc_wr(priv, CVMX_LMCX_MEM_CFG0(if_num), mem_cfg0.u64);
1446 		lmc_rd(priv, CVMX_LMCX_MEM_CFG0(if_num));
1447 	}
1448 
1449 	set_ddr_clock_initialized(priv, if_num, 1);
1450 
1451 	return 0;
1452 }
1453 
octeon_ipd_delay_cycles(u64 cycles)1454 static void octeon_ipd_delay_cycles(u64 cycles)
1455 {
1456 	u64 start = csr_rd(CVMX_IPD_CLK_COUNT);
1457 
1458 	while (start + cycles > csr_rd(CVMX_IPD_CLK_COUNT))
1459 		;
1460 }
1461 
octeon_ipd_delay_cycles_o3(u64 cycles)1462 static void octeon_ipd_delay_cycles_o3(u64 cycles)
1463 {
1464 	u64 start = csr_rd(CVMX_FPA_CLK_COUNT);
1465 
1466 	while (start + cycles > csr_rd(CVMX_FPA_CLK_COUNT))
1467 		;
1468 }
1469 
measure_octeon_ddr_clock(struct ddr_priv * priv,struct ddr_conf * ddr_conf,u32 cpu_hertz,u32 ddr_hertz,u32 ddr_ref_hertz,int if_num,u32 if_mask)1470 static u32 measure_octeon_ddr_clock(struct ddr_priv *priv,
1471 				    struct ddr_conf *ddr_conf, u32 cpu_hertz,
1472 				    u32 ddr_hertz, u32 ddr_ref_hertz,
1473 				    int if_num, u32 if_mask)
1474 {
1475 	u64 core_clocks;
1476 	u64 ddr_clocks;
1477 	u64 calc_ddr_hertz;
1478 
1479 	if (ddr_conf) {
1480 		if (initialize_ddr_clock(priv, ddr_conf, cpu_hertz,
1481 					 ddr_hertz, ddr_ref_hertz, if_num,
1482 					 if_mask) != 0)
1483 			return 0;
1484 	}
1485 
1486 	/* Dynamically determine the DDR clock speed */
1487 	if (OCTEON_IS_OCTEON2() || octeon_is_cpuid(OCTEON_CN70XX)) {
1488 		core_clocks = csr_rd(CVMX_IPD_CLK_COUNT);
1489 		ddr_clocks = lmc_rd(priv, CVMX_LMCX_DCLK_CNT(if_num));
1490 		/* How many cpu cycles to measure over */
1491 		octeon_ipd_delay_cycles(100000000);
1492 		core_clocks = csr_rd(CVMX_IPD_CLK_COUNT) - core_clocks;
1493 		ddr_clocks =
1494 		    lmc_rd(priv, CVMX_LMCX_DCLK_CNT(if_num)) - ddr_clocks;
1495 		calc_ddr_hertz = ddr_clocks * gd->bus_clk / core_clocks;
1496 	} else if (octeon_is_cpuid(OCTEON_CN7XXX)) {
1497 		core_clocks = csr_rd(CVMX_FPA_CLK_COUNT);
1498 		ddr_clocks = lmc_rd(priv, CVMX_LMCX_DCLK_CNT(if_num));
1499 		/* How many cpu cycles to measure over */
1500 		octeon_ipd_delay_cycles_o3(100000000);
1501 		core_clocks = csr_rd(CVMX_FPA_CLK_COUNT) - core_clocks;
1502 		ddr_clocks =
1503 		    lmc_rd(priv, CVMX_LMCX_DCLK_CNT(if_num)) - ddr_clocks;
1504 		calc_ddr_hertz = ddr_clocks * gd->bus_clk / core_clocks;
1505 	} else {
1506 		core_clocks = csr_rd(CVMX_IPD_CLK_COUNT);
1507 		/*
1508 		 * ignore overflow, starts counting when we enable the
1509 		 * controller
1510 		 */
1511 		ddr_clocks = lmc_rd(priv, CVMX_LMCX_DCLK_CNT_LO(if_num));
1512 		/* How many cpu cycles to measure over */
1513 		octeon_ipd_delay_cycles(100000000);
1514 		core_clocks = csr_rd(CVMX_IPD_CLK_COUNT) - core_clocks;
1515 		ddr_clocks =
1516 		    lmc_rd(priv, CVMX_LMCX_DCLK_CNT_LO(if_num)) - ddr_clocks;
1517 		calc_ddr_hertz = ddr_clocks * cpu_hertz / core_clocks;
1518 	}
1519 
1520 	debug("core clocks: %llu, ddr clocks: %llu, calc rate: %llu\n",
1521 	      core_clocks, ddr_clocks, calc_ddr_hertz);
1522 	debug("LMC%d: Measured DDR clock: %lld, cpu clock: %u, ddr clocks: %llu\n",
1523 	      if_num, calc_ddr_hertz, cpu_hertz, ddr_clocks);
1524 
1525 	/* Check for unreasonable settings. */
1526 	if (calc_ddr_hertz < 10000) {
1527 		udelay(8000000 * 100);
1528 		printf("DDR clock misconfigured on interface %d. Resetting...\n",
1529 		       if_num);
1530 		do_reset(NULL, 0, 0, NULL);
1531 	}
1532 
1533 	return calc_ddr_hertz;
1534 }
1535 
lmc_ddr3_rl_dbg_read(struct ddr_priv * priv,int if_num,int idx)1536 u64 lmc_ddr3_rl_dbg_read(struct ddr_priv *priv, int if_num, int idx)
1537 {
1538 	union cvmx_lmcx_rlevel_dbg rlevel_dbg;
1539 	union cvmx_lmcx_rlevel_ctl rlevel_ctl;
1540 
1541 	rlevel_ctl.u64 = lmc_rd(priv, CVMX_LMCX_RLEVEL_CTL(if_num));
1542 	rlevel_ctl.s.byte = idx;
1543 
1544 	lmc_wr(priv, CVMX_LMCX_RLEVEL_CTL(if_num), rlevel_ctl.u64);
1545 	lmc_rd(priv, CVMX_LMCX_RLEVEL_CTL(if_num));
1546 
1547 	rlevel_dbg.u64 = lmc_rd(priv, CVMX_LMCX_RLEVEL_DBG(if_num));
1548 	return rlevel_dbg.s.bitmask;
1549 }
1550 
lmc_ddr3_wl_dbg_read(struct ddr_priv * priv,int if_num,int idx)1551 u64 lmc_ddr3_wl_dbg_read(struct ddr_priv *priv, int if_num, int idx)
1552 {
1553 	union cvmx_lmcx_wlevel_dbg wlevel_dbg;
1554 
1555 	wlevel_dbg.u64 = 0;
1556 	wlevel_dbg.s.byte = idx;
1557 
1558 	lmc_wr(priv, CVMX_LMCX_WLEVEL_DBG(if_num), wlevel_dbg.u64);
1559 	lmc_rd(priv, CVMX_LMCX_WLEVEL_DBG(if_num));
1560 
1561 	wlevel_dbg.u64 = lmc_rd(priv, CVMX_LMCX_WLEVEL_DBG(if_num));
1562 	return wlevel_dbg.s.bitmask;
1563 }
1564 
validate_ddr3_rlevel_bitmask(struct rlevel_bitmask * rlevel_bitmask_p,int ddr_type)1565 int validate_ddr3_rlevel_bitmask(struct rlevel_bitmask *rlevel_bitmask_p,
1566 				 int ddr_type)
1567 {
1568 	int i;
1569 	int errors = 0;
1570 	u64 mask = 0;		/* Used in 64-bit comparisons */
1571 	u8 mstart = 0;
1572 	u8 width = 0;
1573 	u8 firstbit = 0;
1574 	u8 lastbit = 0;
1575 	u8 bubble = 0;
1576 	u8 tbubble = 0;
1577 	u8 blank = 0;
1578 	u8 narrow = 0;
1579 	u8 trailing = 0;
1580 	u64 bitmask = rlevel_bitmask_p->bm;
1581 	u8 extras = 0;
1582 	u8 toolong = 0;
1583 	u64 temp;
1584 
1585 	if (bitmask == 0) {
1586 		blank += RLEVEL_BITMASK_BLANK_ERROR;
1587 	} else {
1588 		/* Look for fb, the first bit */
1589 		temp = bitmask;
1590 		while (!(temp & 1)) {
1591 			firstbit++;
1592 			temp >>= 1;
1593 		}
1594 
1595 		/* Look for lb, the last bit */
1596 		lastbit = firstbit;
1597 		while ((temp >>= 1))
1598 			lastbit++;
1599 
1600 		/*
1601 		 * Start with the max range to try to find the largest mask
1602 		 * within the bitmask data
1603 		 */
1604 		width = MASKRANGE_BITS;
1605 		for (mask = MASKRANGE; mask > 0; mask >>= 1, --width) {
1606 			for (mstart = lastbit - width + 1; mstart >= firstbit;
1607 			     --mstart) {
1608 				temp = mask << mstart;
1609 				if ((bitmask & temp) == temp)
1610 					goto done_now;
1611 			}
1612 		}
1613 done_now:
1614 		/* look for any more contiguous 1's to the right of mstart */
1615 		if (width == MASKRANGE_BITS) {	// only when maximum mask
1616 			while ((bitmask >> (mstart - 1)) & 1) {
1617 				// slide right over more 1's
1618 				--mstart;
1619 				// count the number of extra bits only for DDR4
1620 				if (ddr_type == DDR4_DRAM)
1621 					extras++;
1622 			}
1623 		}
1624 
1625 		/* Penalize any extra 1's beyond the maximum desired mask */
1626 		if (extras > 0)
1627 			toolong =
1628 			    RLEVEL_BITMASK_TOOLONG_ERROR * ((1 << extras) - 1);
1629 
1630 		/* Detect if bitmask is too narrow. */
1631 		if (width < 4)
1632 			narrow = (4 - width) * RLEVEL_BITMASK_NARROW_ERROR;
1633 
1634 		/*
1635 		 * detect leading bubble bits, that is, any 0's between first
1636 		 * and mstart
1637 		 */
1638 		temp = bitmask >> (firstbit + 1);
1639 		i = mstart - firstbit - 1;
1640 		while (--i >= 0) {
1641 			if ((temp & 1) == 0)
1642 				bubble += RLEVEL_BITMASK_BUBBLE_BITS_ERROR;
1643 			temp >>= 1;
1644 		}
1645 
1646 		temp = bitmask >> (mstart + width + extras);
1647 		i = lastbit - (mstart + width + extras - 1);
1648 		while (--i >= 0) {
1649 			if (temp & 1) {
1650 				/*
1651 				 * Detect 1 bits after the trailing end of
1652 				 * the mask, including last.
1653 				 */
1654 				trailing += RLEVEL_BITMASK_TRAILING_BITS_ERROR;
1655 			} else {
1656 				/*
1657 				 * Detect trailing bubble bits, that is,
1658 				 * any 0's between end-of-mask and last
1659 				 */
1660 				tbubble += RLEVEL_BITMASK_BUBBLE_BITS_ERROR;
1661 			}
1662 			temp >>= 1;
1663 		}
1664 	}
1665 
1666 	errors = bubble + tbubble + blank + narrow + trailing + toolong;
1667 
1668 	/* Pass out useful statistics */
1669 	rlevel_bitmask_p->mstart = mstart;
1670 	rlevel_bitmask_p->width = width;
1671 
1672 	debug_bitmask_print("bm:%08lx mask:%02lx, width:%2u, mstart:%2d, fb:%2u, lb:%2u (bu:%2d, tb:%2d, bl:%2d, n:%2d, t:%2d, x:%2d) errors:%3d %s\n",
1673 			    (unsigned long)bitmask, mask, width, mstart,
1674 			    firstbit, lastbit, bubble, tbubble, blank,
1675 			    narrow, trailing, toolong, errors,
1676 			    (errors) ? "=> invalid" : "");
1677 
1678 	return errors;
1679 }
1680 
compute_ddr3_rlevel_delay(u8 mstart,u8 width,union cvmx_lmcx_rlevel_ctl rlevel_ctl)1681 int compute_ddr3_rlevel_delay(u8 mstart, u8 width,
1682 			      union cvmx_lmcx_rlevel_ctl rlevel_ctl)
1683 {
1684 	int delay;
1685 
1686 	debug_bitmask_print("  offset_en:%d", rlevel_ctl.s.offset_en);
1687 
1688 	if (rlevel_ctl.s.offset_en) {
1689 		delay = max((int)mstart,
1690 			    (int)(mstart + width - 1 - rlevel_ctl.s.offset));
1691 	} else {
1692 		/* if (rlevel_ctl.s.offset) { *//* Experimental */
1693 		if (0) {
1694 			delay = max(mstart + rlevel_ctl.s.offset, mstart + 1);
1695 			/*
1696 			 * Insure that the offset delay falls within the
1697 			 * bitmask
1698 			 */
1699 			delay = min(delay, mstart + width - 1);
1700 		} else {
1701 			/* Round down */
1702 			delay = (width - 1) / 2 + mstart;
1703 		}
1704 	}
1705 
1706 	return delay;
1707 }
1708 
1709 /* Default ODT config must disable ODT */
1710 /* Must be const (read only) so that the structure is in flash */
1711 const struct dimm_odt_config disable_odt_config[] = {
1712 	/*   1 */ { 0, 0x0000, {.u64 = 0x0000}, {.u64 = 0x0000}, 0, 0x0000, 0 },
1713 	/*   2 */ { 0, 0x0000, {.u64 = 0x0000}, {.u64 = 0x0000}, 0, 0x0000, 0 },
1714 	/*   3 */ { 0, 0x0000, {.u64 = 0x0000}, {.u64 = 0x0000}, 0, 0x0000, 0 },
1715 	/*   4 */ { 0, 0x0000, {.u64 = 0x0000}, {.u64 = 0x0000}, 0, 0x0000, 0 },
1716 };
1717 
1718 /* Memory controller setup function */
init_octeon_dram_interface(struct ddr_priv * priv,struct ddr_conf * ddr_conf,u32 ddr_hertz,u32 cpu_hertz,u32 ddr_ref_hertz,int if_num,u32 if_mask)1719 static int init_octeon_dram_interface(struct ddr_priv *priv,
1720 				      struct ddr_conf *ddr_conf,
1721 				      u32 ddr_hertz, u32 cpu_hertz,
1722 				      u32 ddr_ref_hertz, int if_num,
1723 				      u32 if_mask)
1724 {
1725 	u32 mem_size_mbytes = 0;
1726 	char *s;
1727 
1728 	s = lookup_env(priv, "ddr_timing_hertz");
1729 	if (s)
1730 		ddr_hertz = simple_strtoul(s, NULL, 0);
1731 
1732 	if (OCTEON_IS_OCTEON3()) {
1733 		int lmc_restart_retries = 0;
1734 #define DEFAULT_RESTART_RETRIES 3
1735 		int lmc_restart_retries_limit = DEFAULT_RESTART_RETRIES;
1736 
1737 		s = lookup_env(priv, "ddr_restart_retries_limit");
1738 		if (s)
1739 			lmc_restart_retries_limit = simple_strtoul(s, NULL, 0);
1740 
1741 restart_lmc_init:
1742 		mem_size_mbytes = init_octeon3_ddr3_interface(priv, ddr_conf,
1743 							      ddr_hertz,
1744 							      cpu_hertz,
1745 							      ddr_ref_hertz,
1746 							      if_num, if_mask);
1747 		if (mem_size_mbytes == 0) {	// 0 means restart is possible
1748 			if (lmc_restart_retries < lmc_restart_retries_limit) {
1749 				lmc_restart_retries++;
1750 				printf("N0.LMC%d Configuration problem: attempting LMC reset and init restart %d\n",
1751 				       if_num, lmc_restart_retries);
1752 				goto restart_lmc_init;
1753 			} else {
1754 				if (lmc_restart_retries_limit > 0) {
1755 					printf("INFO: N0.LMC%d Configuration: fatal problem remains after %d LMC init retries - Resetting node...\n",
1756 					       if_num, lmc_restart_retries);
1757 					mdelay(500);
1758 					do_reset(NULL, 0, 0, NULL);
1759 				} else {
1760 					// return an error, no restart
1761 					mem_size_mbytes = -1;
1762 				}
1763 			}
1764 		}
1765 	}
1766 
1767 	debug("N0.LMC%d Configuration Completed: %d MB\n",
1768 	      if_num, mem_size_mbytes);
1769 
1770 	return mem_size_mbytes;
1771 }
1772 
1773 #define WLEVEL_BYTE_BITS	5
1774 #define WLEVEL_BYTE_MSK		((1ULL << 5) - 1)
1775 
upd_wl_rank(union cvmx_lmcx_wlevel_rankx * lmc_wlevel_rank,int byte,int delay)1776 void upd_wl_rank(union cvmx_lmcx_wlevel_rankx *lmc_wlevel_rank,
1777 		 int byte, int delay)
1778 {
1779 	union cvmx_lmcx_wlevel_rankx temp_wlevel_rank;
1780 
1781 	if (byte >= 0 && byte <= 8) {
1782 		temp_wlevel_rank.u64 = lmc_wlevel_rank->u64;
1783 		temp_wlevel_rank.u64 &=
1784 		    ~(WLEVEL_BYTE_MSK << (WLEVEL_BYTE_BITS * byte));
1785 		temp_wlevel_rank.u64 |=
1786 		    ((delay & WLEVEL_BYTE_MSK) << (WLEVEL_BYTE_BITS * byte));
1787 		lmc_wlevel_rank->u64 = temp_wlevel_rank.u64;
1788 	}
1789 }
1790 
get_wl_rank(union cvmx_lmcx_wlevel_rankx * lmc_wlevel_rank,int byte)1791 int get_wl_rank(union cvmx_lmcx_wlevel_rankx *lmc_wlevel_rank, int byte)
1792 {
1793 	int delay = 0;
1794 
1795 	if (byte >= 0 && byte <= 8)
1796 		delay =
1797 		    ((lmc_wlevel_rank->u64) >> (WLEVEL_BYTE_BITS *
1798 						byte)) & WLEVEL_BYTE_MSK;
1799 
1800 	return delay;
1801 }
1802 
upd_rl_rank(union cvmx_lmcx_rlevel_rankx * lmc_rlevel_rank,int byte,int delay)1803 void upd_rl_rank(union cvmx_lmcx_rlevel_rankx *lmc_rlevel_rank,
1804 		 int byte, int delay)
1805 {
1806 	union cvmx_lmcx_rlevel_rankx temp_rlevel_rank;
1807 
1808 	if (byte >= 0 && byte <= 8) {
1809 		temp_rlevel_rank.u64 =
1810 		    lmc_rlevel_rank->u64 & ~(RLEVEL_BYTE_MSK <<
1811 					     (RLEVEL_BYTE_BITS * byte));
1812 		temp_rlevel_rank.u64 |=
1813 		    ((delay & RLEVEL_BYTE_MSK) << (RLEVEL_BYTE_BITS * byte));
1814 		lmc_rlevel_rank->u64 = temp_rlevel_rank.u64;
1815 	}
1816 }
1817 
get_rl_rank(union cvmx_lmcx_rlevel_rankx * lmc_rlevel_rank,int byte)1818 int get_rl_rank(union cvmx_lmcx_rlevel_rankx *lmc_rlevel_rank, int byte)
1819 {
1820 	int delay = 0;
1821 
1822 	if (byte >= 0 && byte <= 8)
1823 		delay =
1824 		    ((lmc_rlevel_rank->u64) >> (RLEVEL_BYTE_BITS *
1825 						byte)) & RLEVEL_BYTE_MSK;
1826 
1827 	return delay;
1828 }
1829 
rlevel_to_wlevel(union cvmx_lmcx_rlevel_rankx * lmc_rlevel_rank,union cvmx_lmcx_wlevel_rankx * lmc_wlevel_rank,int byte)1830 void rlevel_to_wlevel(union cvmx_lmcx_rlevel_rankx *lmc_rlevel_rank,
1831 		      union cvmx_lmcx_wlevel_rankx *lmc_wlevel_rank, int byte)
1832 {
1833 	int byte_delay = get_rl_rank(lmc_rlevel_rank, byte);
1834 
1835 	debug("Estimating Wlevel delay byte %d: ", byte);
1836 	debug("Rlevel=%d => ", byte_delay);
1837 	byte_delay = divide_roundup(byte_delay, 2) & 0x1e;
1838 	debug("Wlevel=%d\n", byte_delay);
1839 	upd_wl_rank(lmc_wlevel_rank, byte, byte_delay);
1840 }
1841 
1842 /* Delay trend: constant=0, decreasing=-1, increasing=1 */
calc_delay_trend(s64 v)1843 static s64 calc_delay_trend(s64 v)
1844 {
1845 	if (v == 0)
1846 		return 0;
1847 	if (v < 0)
1848 		return -1;
1849 
1850 	return 1;
1851 }
1852 
1853 /*
1854  * Evaluate delay sequence across the whole range of byte delays while
1855  * keeping track of the overall delay trend, increasing or decreasing.
1856  * If the trend changes charge an error amount to the score.
1857  */
1858 
1859 // NOTE: "max_adj_delay_inc" argument is, by default, 1 for DDR3 and 2 for DDR4
1860 
nonseq_del(struct rlevel_byte_data * rlevel_byte,int start,int end,int max_adj_delay_inc)1861 int nonseq_del(struct rlevel_byte_data *rlevel_byte, int start, int end,
1862 	       int max_adj_delay_inc)
1863 {
1864 	s64 error = 0;
1865 	s64 delay_trend, prev_trend = 0;
1866 	int byte_idx;
1867 	s64 seq_err;
1868 	s64 adj_err;
1869 	s64 delay_inc;
1870 	s64 delay_diff;
1871 
1872 	for (byte_idx = start; byte_idx < end; ++byte_idx) {
1873 		delay_diff = rlevel_byte[byte_idx + 1].delay -
1874 			rlevel_byte[byte_idx].delay;
1875 		delay_trend = calc_delay_trend(delay_diff);
1876 
1877 		/*
1878 		 * Increment error each time the trend changes to the
1879 		 * opposite direction.
1880 		 */
1881 		if (prev_trend != 0 && delay_trend != 0 &&
1882 		    prev_trend != delay_trend) {
1883 			seq_err = RLEVEL_NONSEQUENTIAL_DELAY_ERROR;
1884 		} else {
1885 			seq_err = 0;
1886 		}
1887 
1888 		// how big was the delay change, if any
1889 		delay_inc = abs(delay_diff);
1890 
1891 		/*
1892 		 * Even if the trend did not change to the opposite direction,
1893 		 * check for the magnitude of the change, and scale the
1894 		 * penalty by the amount that the size is larger than the
1895 		 * provided limit.
1896 		 */
1897 		if (max_adj_delay_inc != 0 && delay_inc > max_adj_delay_inc) {
1898 			adj_err = (delay_inc - max_adj_delay_inc) *
1899 				RLEVEL_ADJACENT_DELAY_ERROR;
1900 		} else {
1901 			adj_err = 0;
1902 		}
1903 
1904 		rlevel_byte[byte_idx + 1].sqerrs = seq_err + adj_err;
1905 		error += seq_err + adj_err;
1906 
1907 		debug_bitmask_print("Byte %d: %d, Byte %d: %d, delay_trend: %ld, prev_trend: %ld, [%ld/%ld]%s%s\n",
1908 				    byte_idx + 0,
1909 				    rlevel_byte[byte_idx + 0].delay,
1910 				    byte_idx + 1,
1911 				    rlevel_byte[byte_idx + 1].delay,
1912 				    delay_trend,
1913 				    prev_trend, seq_err, adj_err,
1914 				    (seq_err) ?
1915 				    " => Nonsequential byte delay" : "",
1916 				    (adj_err) ?
1917 				    " => Adjacent delay error" : "");
1918 
1919 		if (delay_trend != 0)
1920 			prev_trend = delay_trend;
1921 	}
1922 
1923 	return (int)error;
1924 }
1925 
roundup_ddr3_wlevel_bitmask(int bitmask)1926 int roundup_ddr3_wlevel_bitmask(int bitmask)
1927 {
1928 	int shifted_bitmask;
1929 	int leader;
1930 	int delay;
1931 
1932 	for (leader = 0; leader < 8; ++leader) {
1933 		shifted_bitmask = (bitmask >> leader);
1934 		if ((shifted_bitmask & 1) == 0)
1935 			break;
1936 	}
1937 
1938 	for (leader = leader; leader < 16; ++leader) {
1939 		shifted_bitmask = (bitmask >> (leader % 8));
1940 		if (shifted_bitmask & 1)
1941 			break;
1942 	}
1943 
1944 	delay = (leader & 1) ? leader + 1 : leader;
1945 	delay = delay % 8;
1946 
1947 	return delay;
1948 }
1949 
1950 /* Octeon 2 */
oct2_ddr3_seq(struct ddr_priv * priv,int rank_mask,int if_num,int sequence)1951 static void oct2_ddr3_seq(struct ddr_priv *priv, int rank_mask, int if_num,
1952 			  int sequence)
1953 {
1954 	char *s;
1955 
1956 #ifdef DEBUG_PERFORM_DDR3_SEQUENCE
1957 	static const char * const sequence_str[] = {
1958 		"power-up/init",
1959 		"read-leveling",
1960 		"self-refresh entry",
1961 		"self-refresh exit",
1962 		"precharge power-down entry",
1963 		"precharge power-down exit",
1964 		"write-leveling",
1965 		"illegal"
1966 	};
1967 #endif
1968 
1969 	union cvmx_lmcx_control lmc_control;
1970 	union cvmx_lmcx_config lmc_config;
1971 	int save_ddr2t;
1972 
1973 	lmc_control.u64 = lmc_rd(priv, CVMX_LMCX_CONTROL(if_num));
1974 	save_ddr2t = lmc_control.s.ddr2t;
1975 
1976 	if (save_ddr2t == 0 && octeon_is_cpuid(OCTEON_CN63XX_PASS1_X)) {
1977 		/* Some register parts (IDT and TI included) do not like
1978 		 * the sequence that LMC generates for an MRS register
1979 		 * write in 1T mode. In this case, the register part does
1980 		 * not properly forward the MRS register write to the DRAM
1981 		 * parts.  See errata (LMC-14548) Issues with registered
1982 		 * DIMMs.
1983 		 */
1984 		debug("Forcing DDR 2T during init seq. Re: Pass 1 LMC-14548\n");
1985 		lmc_control.s.ddr2t = 1;
1986 	}
1987 
1988 	s = lookup_env(priv, "ddr_init_2t");
1989 	if (s)
1990 		lmc_control.s.ddr2t = simple_strtoul(s, NULL, 0);
1991 
1992 	lmc_wr(priv, CVMX_LMCX_CONTROL(if_num), lmc_control.u64);
1993 
1994 	lmc_config.u64 = lmc_rd(priv, CVMX_LMCX_CONFIG(if_num));
1995 
1996 	lmc_config.s.init_start = 1;
1997 	if (OCTEON_IS_OCTEON2())
1998 		lmc_config.cn63xx.sequence = sequence;
1999 	lmc_config.s.rankmask = rank_mask;
2000 
2001 #ifdef DEBUG_PERFORM_DDR3_SEQUENCE
2002 	debug("Performing LMC sequence: rank_mask=0x%02x, sequence=%d, %s\n",
2003 	      rank_mask, sequence, sequence_str[sequence]);
2004 #endif
2005 
2006 	lmc_wr(priv, CVMX_LMCX_CONFIG(if_num), lmc_config.u64);
2007 	lmc_rd(priv, CVMX_LMCX_CONFIG(if_num));
2008 	udelay(600);		/* Wait a while */
2009 
2010 	lmc_control.s.ddr2t = save_ddr2t;
2011 	lmc_wr(priv, CVMX_LMCX_CONTROL(if_num), lmc_control.u64);
2012 	lmc_rd(priv, CVMX_LMCX_CONTROL(if_num));
2013 }
2014 
2015 /* Check to see if any custom offset values are used */
is_dll_offset_provided(const int8_t * dll_offset_table)2016 static int is_dll_offset_provided(const int8_t *dll_offset_table)
2017 {
2018 	int i;
2019 
2020 	if (!dll_offset_table)	/* Check for pointer to table. */
2021 		return 0;
2022 
2023 	for (i = 0; i < 9; ++i) {
2024 		if (dll_offset_table[i] != 0)
2025 			return 1;
2026 	}
2027 
2028 	return 0;
2029 }
2030 
change_dll_offset_enable(struct ddr_priv * priv,int if_num,int change)2031 void change_dll_offset_enable(struct ddr_priv *priv, int if_num, int change)
2032 {
2033 	union cvmx_lmcx_dll_ctl3 ddr_dll_ctl3;
2034 
2035 	ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(if_num));
2036 	SET_DDR_DLL_CTL3(offset_ena, !!change);
2037 	lmc_wr(priv, CVMX_LMCX_DLL_CTL3(if_num), ddr_dll_ctl3.u64);
2038 	ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(if_num));
2039 }
2040 
load_dll_offset(struct ddr_priv * priv,int if_num,int dll_offset_mode,int byte_offset,int byte)2041 unsigned short load_dll_offset(struct ddr_priv *priv, int if_num,
2042 			       int dll_offset_mode, int byte_offset, int byte)
2043 {
2044 	union cvmx_lmcx_dll_ctl3 ddr_dll_ctl3;
2045 	int field_width = 6;
2046 	/*
2047 	 * byte_sel:
2048 	 * 0x1 = byte 0, ..., 0x9 = byte 8
2049 	 * 0xA = all bytes
2050 	 */
2051 	int byte_sel = (byte == 10) ? byte : byte + 1;
2052 
2053 	if (octeon_is_cpuid(OCTEON_CN6XXX))
2054 		field_width = 5;
2055 
2056 	ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(if_num));
2057 	SET_DDR_DLL_CTL3(load_offset, 0);
2058 	lmc_wr(priv, CVMX_LMCX_DLL_CTL3(if_num), ddr_dll_ctl3.u64);
2059 	ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(if_num));
2060 
2061 	SET_DDR_DLL_CTL3(mode_sel, dll_offset_mode);
2062 	SET_DDR_DLL_CTL3(offset,
2063 			 (abs(byte_offset) & (~(-1 << field_width))) |
2064 			 (_sign(byte_offset) << field_width));
2065 	SET_DDR_DLL_CTL3(byte_sel, byte_sel);
2066 	lmc_wr(priv, CVMX_LMCX_DLL_CTL3(if_num), ddr_dll_ctl3.u64);
2067 	ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(if_num));
2068 
2069 	SET_DDR_DLL_CTL3(load_offset, 1);
2070 	lmc_wr(priv, CVMX_LMCX_DLL_CTL3(if_num), ddr_dll_ctl3.u64);
2071 	ddr_dll_ctl3.u64 = lmc_rd(priv, CVMX_LMCX_DLL_CTL3(if_num));
2072 
2073 	return (unsigned short)GET_DDR_DLL_CTL3(offset);
2074 }
2075 
process_custom_dll_offsets(struct ddr_priv * priv,int if_num,const char * enable_str,const int8_t * offsets,const char * byte_str,int mode)2076 void process_custom_dll_offsets(struct ddr_priv *priv, int if_num,
2077 				const char *enable_str,
2078 				const int8_t *offsets, const char *byte_str,
2079 				int mode)
2080 {
2081 	const char *s;
2082 	int enabled;
2083 	int provided;
2084 	int byte_offset;
2085 	unsigned short offset[9] = { 0 };
2086 	int byte;
2087 
2088 	s = lookup_env(priv, enable_str);
2089 	if (s)
2090 		enabled = !!simple_strtol(s, NULL, 0);
2091 	else
2092 		enabled = -1;
2093 
2094 	/*
2095 	 * enabled == -1: no override, do only configured offsets if provided
2096 	 * enabled ==  0: override OFF, do NOT do it even if configured
2097 	 *                offsets provided
2098 	 * enabled ==  1: override ON, do it for overrides plus configured
2099 	 *                offsets
2100 	 */
2101 
2102 	if (enabled == 0)
2103 		return;
2104 
2105 	provided = is_dll_offset_provided(offsets);
2106 
2107 	if (enabled < 0 && !provided)
2108 		return;
2109 
2110 	change_dll_offset_enable(priv, if_num, 0);
2111 
2112 	for (byte = 0; byte < 9; ++byte) {
2113 		// always take the provided, if available
2114 		byte_offset = (provided) ? offsets[byte] : 0;
2115 
2116 		// then, if enabled, use any overrides present
2117 		if (enabled > 0) {
2118 			s = lookup_env(priv, byte_str, if_num, byte);
2119 			if (s)
2120 				byte_offset = simple_strtol(s, NULL, 0);
2121 		}
2122 
2123 		offset[byte] =
2124 		    load_dll_offset(priv, if_num, mode, byte_offset, byte);
2125 	}
2126 
2127 	change_dll_offset_enable(priv, if_num, 1);
2128 
2129 	debug("N0.LMC%d: DLL %s Offset 8:0       :  0x%02x  0x%02x  0x%02x  0x%02x  0x%02x  0x%02x  0x%02x  0x%02x  0x%02x\n",
2130 	      if_num, (mode == 2) ? "Read " : "Write",
2131 	      offset[8], offset[7], offset[6], offset[5], offset[4],
2132 	      offset[3], offset[2], offset[1], offset[0]);
2133 }
2134 
ddr_init_seq(struct ddr_priv * priv,int rank_mask,int if_num)2135 void ddr_init_seq(struct ddr_priv *priv, int rank_mask, int if_num)
2136 {
2137 	char *s;
2138 	int ddr_init_loops = 1;
2139 	int rankx;
2140 
2141 	s = lookup_env(priv, "ddr%d_init_loops", if_num);
2142 	if (s)
2143 		ddr_init_loops = simple_strtoul(s, NULL, 0);
2144 
2145 	while (ddr_init_loops--) {
2146 		for (rankx = 0; rankx < 8; rankx++) {
2147 			if (!(rank_mask & (1 << rankx)))
2148 				continue;
2149 
2150 			if (OCTEON_IS_OCTEON3()) {
2151 				/* power-up/init */
2152 				oct3_ddr3_seq(priv, 1 << rankx, if_num, 0);
2153 			} else {
2154 				/* power-up/init */
2155 				oct2_ddr3_seq(priv, 1 << rankx, if_num, 0);
2156 			}
2157 
2158 			udelay(1000);	/* Wait a while. */
2159 
2160 			s = lookup_env(priv, "ddr_sequence1");
2161 			if (s) {
2162 				int sequence1;
2163 
2164 				sequence1 = simple_strtoul(s, NULL, 0);
2165 
2166 				if (OCTEON_IS_OCTEON3()) {
2167 					oct3_ddr3_seq(priv, 1 << rankx,
2168 						      if_num, sequence1);
2169 				} else {
2170 					oct2_ddr3_seq(priv, 1 << rankx,
2171 						      if_num, sequence1);
2172 				}
2173 			}
2174 
2175 			s = lookup_env(priv, "ddr_sequence2");
2176 			if (s) {
2177 				int sequence2;
2178 
2179 				sequence2 = simple_strtoul(s, NULL, 0);
2180 
2181 				if (OCTEON_IS_OCTEON3())
2182 					oct3_ddr3_seq(priv, 1 << rankx,
2183 						      if_num, sequence2);
2184 				else
2185 					oct2_ddr3_seq(priv, 1 << rankx,
2186 						      if_num, sequence2);
2187 			}
2188 		}
2189 	}
2190 }
2191 
octeon_ddr_initialize(struct ddr_priv * priv,u32 cpu_hertz,u32 ddr_hertz,u32 ddr_ref_hertz,u32 if_mask,struct ddr_conf * ddr_conf,u32 * measured_ddr_hertz)2192 static int octeon_ddr_initialize(struct ddr_priv *priv, u32 cpu_hertz,
2193 				 u32 ddr_hertz, u32 ddr_ref_hertz,
2194 				 u32 if_mask,
2195 				 struct ddr_conf *ddr_conf,
2196 				 u32 *measured_ddr_hertz)
2197 {
2198 	u32 ddr_conf_valid_mask = 0;
2199 	int memsize_mbytes = 0;
2200 	char *eptr;
2201 	int if_idx;
2202 	u32 ddr_max_speed = 667000000;
2203 	u32 calc_ddr_hertz = -1;
2204 	int val;
2205 	int ret;
2206 
2207 	if (env_get("ddr_verbose") || env_get("ddr_prompt"))
2208 		priv->flags |= FLAG_DDR_VERBOSE;
2209 
2210 #ifdef DDR_VERBOSE
2211 	priv->flags |= FLAG_DDR_VERBOSE;
2212 #endif
2213 
2214 	if (env_get("ddr_trace_init")) {
2215 		printf("Parameter ddr_trace_init found in environment.\n");
2216 		priv->flags |= FLAG_DDR_TRACE_INIT;
2217 		priv->flags |= FLAG_DDR_VERBOSE;
2218 	}
2219 
2220 	priv->flags |= FLAG_DDR_DEBUG;
2221 
2222 	val = env_get_ulong("ddr_debug", 10, (u32)-1);
2223 	switch (val) {
2224 	case 0:
2225 		priv->flags &= ~FLAG_DDR_DEBUG;
2226 		printf("Parameter ddr_debug clear in environment\n");
2227 		break;
2228 	case (u32)-1:
2229 		break;
2230 	default:
2231 		printf("Parameter ddr_debug set in environment\n");
2232 		priv->flags |= FLAG_DDR_DEBUG;
2233 		priv->flags |= FLAG_DDR_VERBOSE;
2234 		break;
2235 	}
2236 	if (env_get("ddr_prompt"))
2237 		priv->flags |= FLAG_DDR_PROMPT;
2238 
2239 	/* Force ddr_verbose for failsafe debugger */
2240 	if (priv->flags & FLAG_FAILSAFE_MODE)
2241 		priv->flags |= FLAG_DDR_VERBOSE;
2242 
2243 #ifdef DDR_DEBUG
2244 	priv->flags |= FLAG_DDR_DEBUG;
2245 	/* Keep verbose on while we are still debugging. */
2246 	priv->flags |= FLAG_DDR_VERBOSE;
2247 #endif
2248 
2249 	if ((octeon_is_cpuid(OCTEON_CN61XX) ||
2250 	     octeon_is_cpuid(OCTEON_CNF71XX)) && ddr_max_speed > 533333333) {
2251 		ddr_max_speed = 533333333;
2252 	} else if (octeon_is_cpuid(OCTEON_CN7XXX)) {
2253 		/* Override speed restrictions to support internal testing. */
2254 		ddr_max_speed = 1210000000;
2255 	}
2256 
2257 	if (ddr_hertz > ddr_max_speed) {
2258 		printf("DDR clock speed %u exceeds maximum supported DDR speed, reducing to %uHz\n",
2259 		       ddr_hertz, ddr_max_speed);
2260 		ddr_hertz = ddr_max_speed;
2261 	}
2262 
2263 	if (OCTEON_IS_OCTEON3()) {	// restrict check
2264 		if (ddr_hertz > cpu_hertz) {
2265 			printf("\nFATAL ERROR: DDR speed %u exceeds CPU speed %u, exiting...\n\n",
2266 			       ddr_hertz, cpu_hertz);
2267 			return -1;
2268 		}
2269 	}
2270 
2271 	/* Enable L2 ECC */
2272 	eptr = env_get("disable_l2_ecc");
2273 	if (eptr) {
2274 		printf("Disabling L2 ECC based on disable_l2_ecc environment variable\n");
2275 		union cvmx_l2c_ctl l2c_val;
2276 
2277 		l2c_val.u64 = l2c_rd(priv, CVMX_L2C_CTL_REL);
2278 		l2c_val.s.disecc = 1;
2279 		l2c_wr(priv, CVMX_L2C_CTL_REL, l2c_val.u64);
2280 	} else {
2281 		union cvmx_l2c_ctl l2c_val;
2282 
2283 		l2c_val.u64 = l2c_rd(priv, CVMX_L2C_CTL_REL);
2284 		l2c_val.s.disecc = 0;
2285 		l2c_wr(priv, CVMX_L2C_CTL_REL, l2c_val.u64);
2286 	}
2287 
2288 	/*
2289 	 * Init the L2C, must be done before DRAM access so that we
2290 	 * know L2 is empty
2291 	 */
2292 	eptr = env_get("disable_l2_index_aliasing");
2293 	if (eptr) {
2294 		union cvmx_l2c_ctl l2c_val;
2295 
2296 		puts("L2 index aliasing disabled.\n");
2297 
2298 		l2c_val.u64 = l2c_rd(priv, CVMX_L2C_CTL_REL);
2299 		l2c_val.s.disidxalias = 1;
2300 		l2c_wr(priv, CVMX_L2C_CTL_REL, l2c_val.u64);
2301 	} else {
2302 		union cvmx_l2c_ctl l2c_val;
2303 
2304 		/* Enable L2C index aliasing */
2305 
2306 		l2c_val.u64 = l2c_rd(priv, CVMX_L2C_CTL_REL);
2307 		l2c_val.s.disidxalias = 0;
2308 		l2c_wr(priv, CVMX_L2C_CTL_REL, l2c_val.u64);
2309 	}
2310 
2311 	if (OCTEON_IS_OCTEON3()) {
2312 		/*
2313 		 * rdf_cnt: Defines the sample point of the LMC response data in
2314 		 * the DDR-clock/core-clock crossing.  For optimal
2315 		 * performance set to 10 * (DDR-clock period/core-clock
2316 		 * period) - 1.  To disable set to 0. All other values
2317 		 * are reserved.
2318 		 */
2319 
2320 		union cvmx_l2c_ctl l2c_ctl;
2321 		u64 rdf_cnt;
2322 		char *s;
2323 
2324 		l2c_ctl.u64 = l2c_rd(priv, CVMX_L2C_CTL_REL);
2325 
2326 		/*
2327 		 * It is more convenient to compute the ratio using clock
2328 		 * frequencies rather than clock periods.
2329 		 */
2330 		rdf_cnt = (((u64)10 * cpu_hertz) / ddr_hertz) - 1;
2331 		rdf_cnt = rdf_cnt < 256 ? rdf_cnt : 255;
2332 		l2c_ctl.cn78xx.rdf_cnt = rdf_cnt;
2333 
2334 		s = lookup_env(priv, "early_fill_count");
2335 		if (s)
2336 			l2c_ctl.cn78xx.rdf_cnt = simple_strtoul(s, NULL, 0);
2337 
2338 		debug("%-45s : %d, cpu_hertz:%d, ddr_hertz:%d\n",
2339 		      "EARLY FILL COUNT  ", l2c_ctl.cn78xx.rdf_cnt, cpu_hertz,
2340 		      ddr_hertz);
2341 		l2c_wr(priv, CVMX_L2C_CTL_REL, l2c_ctl.u64);
2342 	}
2343 
2344 	/* Check for lower DIMM socket populated */
2345 	for (if_idx = 0; if_idx < 4; ++if_idx) {
2346 		if ((if_mask & (1 << if_idx)) &&
2347 		    validate_dimm(priv,
2348 				  &ddr_conf[(int)if_idx].dimm_config_table[0],
2349 				  0))
2350 			ddr_conf_valid_mask |= (1 << if_idx);
2351 	}
2352 
2353 	if (octeon_is_cpuid(OCTEON_CN68XX) || octeon_is_cpuid(OCTEON_CN78XX)) {
2354 		int four_lmc_mode = 1;
2355 		char *s;
2356 
2357 		if (priv->flags & FLAG_FAILSAFE_MODE)
2358 			four_lmc_mode = 0;
2359 
2360 		/* Pass 1.0 disable four LMC mode.
2361 		 *  See errata (LMC-15811)
2362 		 */
2363 		if (octeon_is_cpuid(OCTEON_CN68XX_PASS1_0))
2364 			four_lmc_mode = 0;
2365 
2366 		s = env_get("ddr_four_lmc");
2367 		if (s) {
2368 			four_lmc_mode = simple_strtoul(s, NULL, 0);
2369 			printf("Parameter found in environment. ddr_four_lmc = %d\n",
2370 			       four_lmc_mode);
2371 		}
2372 
2373 		if (!four_lmc_mode) {
2374 			puts("Forcing two-LMC Mode.\n");
2375 			/* Invalidate LMC[2:3] */
2376 			ddr_conf_valid_mask &= ~(3 << 2);
2377 		}
2378 	} else if (octeon_is_cpuid(OCTEON_CN73XX)) {
2379 		int one_lmc_mode = 0;
2380 		char *s;
2381 
2382 		s = env_get("ddr_one_lmc");
2383 		if (s) {
2384 			one_lmc_mode = simple_strtoul(s, NULL, 0);
2385 			printf("Parameter found in environment. ddr_one_lmc = %d\n",
2386 			       one_lmc_mode);
2387 		}
2388 
2389 		if (one_lmc_mode) {
2390 			puts("Forcing one-LMC Mode.\n");
2391 			/* Invalidate LMC[1:3] */
2392 			ddr_conf_valid_mask &= ~(1 << 1);
2393 		}
2394 	}
2395 
2396 	if (!ddr_conf_valid_mask) {
2397 		printf
2398 		    ("ERROR: No valid DIMMs detected on any DDR interface.\n");
2399 		hang();
2400 		return -1;	// testr-only: no ret negativ!!!
2401 	}
2402 
2403 	/*
2404 	 * We measure the DDR frequency by counting DDR clocks.  We can
2405 	 * confirm or adjust the expected frequency as necessary.  We use
2406 	 * the measured frequency to make accurate timing calculations
2407 	 * used to configure the controller.
2408 	 */
2409 	for (if_idx = 0; if_idx < 4; ++if_idx) {
2410 		u32 tmp_hertz;
2411 
2412 		if (!(ddr_conf_valid_mask & (1 << if_idx)))
2413 			continue;
2414 
2415 try_again:
2416 		/*
2417 		 * only check for alternate refclk wanted on chips that
2418 		 * support it
2419 		 */
2420 		if ((octeon_is_cpuid(OCTEON_CN73XX)) ||
2421 		    (octeon_is_cpuid(OCTEON_CNF75XX)) ||
2422 		    (octeon_is_cpuid(OCTEON_CN78XX_PASS2_X))) {
2423 			// only need do this if we are LMC0
2424 			if (if_idx == 0) {
2425 				union cvmx_lmcx_ddr_pll_ctl ddr_pll_ctl;
2426 
2427 				ddr_pll_ctl.u64 =
2428 				    lmc_rd(priv, CVMX_LMCX_DDR_PLL_CTL(0));
2429 
2430 				/*
2431 				 * If we are asking for 100 MHz refclk, we can
2432 				 * only get it via alternate, so switch to it
2433 				 */
2434 				if (ddr_ref_hertz == 100000000) {
2435 					ddr_pll_ctl.cn78xx.dclk_alt_refclk_sel =
2436 					    1;
2437 					lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(0),
2438 					       ddr_pll_ctl.u64);
2439 					udelay(1000);	// wait 1 msec
2440 				} else {
2441 					/*
2442 					 * If we are NOT asking for 100MHz,
2443 					 * then reset to (assumed) 50MHz and go
2444 					 * on
2445 					 */
2446 					ddr_pll_ctl.cn78xx.dclk_alt_refclk_sel =
2447 					    0;
2448 					lmc_wr(priv, CVMX_LMCX_DDR_PLL_CTL(0),
2449 					       ddr_pll_ctl.u64);
2450 					udelay(1000);	// wait 1 msec
2451 				}
2452 			}
2453 		} else {
2454 			if (ddr_ref_hertz == 100000000) {
2455 				debug("N0: DRAM init: requested 100 MHz refclk NOT SUPPORTED\n");
2456 				ddr_ref_hertz = CONFIG_REF_HERTZ;
2457 			}
2458 		}
2459 
2460 		tmp_hertz = measure_octeon_ddr_clock(priv, &ddr_conf[if_idx],
2461 						     cpu_hertz, ddr_hertz,
2462 						     ddr_ref_hertz, if_idx,
2463 						     ddr_conf_valid_mask);
2464 
2465 		/*
2466 		 * only check for alternate refclk acquired on chips that
2467 		 * support it
2468 		 */
2469 		if ((octeon_is_cpuid(OCTEON_CN73XX)) ||
2470 		    (octeon_is_cpuid(OCTEON_CNF75XX)) ||
2471 		    (octeon_is_cpuid(OCTEON_CN78XX_PASS2_X))) {
2472 			/*
2473 			 * if we are LMC0 and we are asked for 100 MHz refclk,
2474 			 * we must be sure it is available
2475 			 * If not, we print an error message, set to 50MHz,
2476 			 * and go on...
2477 			 */
2478 			if (if_idx == 0 && ddr_ref_hertz == 100000000) {
2479 				/*
2480 				 * Validate that the clock returned is close
2481 				 * enough to the clock desired
2482 				 */
2483 				// FIXME: is 5% close enough?
2484 				int hertz_diff =
2485 				    abs((int)tmp_hertz - (int)ddr_hertz);
2486 				if (hertz_diff > ((int)ddr_hertz * 5 / 100)) {
2487 					// nope, diff is greater than than 5%
2488 					debug("N0: DRAM init: requested 100 MHz refclk NOT FOUND\n");
2489 					ddr_ref_hertz = CONFIG_REF_HERTZ;
2490 					// clear the flag before trying again!!
2491 					set_ddr_clock_initialized(priv, 0, 0);
2492 					goto try_again;
2493 				} else {
2494 					debug("N0: DRAM Init: requested 100 MHz refclk FOUND and SELECTED\n");
2495 				}
2496 			}
2497 		}
2498 
2499 		if (tmp_hertz > 0)
2500 			calc_ddr_hertz = tmp_hertz;
2501 		debug("LMC%d: measured speed: %u hz\n", if_idx, tmp_hertz);
2502 	}
2503 
2504 	if (measured_ddr_hertz)
2505 		*measured_ddr_hertz = calc_ddr_hertz;
2506 
2507 	memsize_mbytes = 0;
2508 	for (if_idx = 0; if_idx < 4; ++if_idx) {
2509 		if (!(ddr_conf_valid_mask & (1 << if_idx)))
2510 			continue;
2511 
2512 		ret = init_octeon_dram_interface(priv, &ddr_conf[if_idx],
2513 						 calc_ddr_hertz,
2514 						 cpu_hertz, ddr_ref_hertz,
2515 						 if_idx, ddr_conf_valid_mask);
2516 		if (ret > 0)
2517 			memsize_mbytes += ret;
2518 	}
2519 
2520 	if (memsize_mbytes == 0)
2521 		/* All interfaces failed to initialize, so return error */
2522 		return -1;
2523 
2524 	/*
2525 	 * switch over to DBI mode only for chips that support it, and
2526 	 * enabled by envvar
2527 	 */
2528 	if ((octeon_is_cpuid(OCTEON_CN73XX)) ||
2529 	    (octeon_is_cpuid(OCTEON_CNF75XX)) ||
2530 	    (octeon_is_cpuid(OCTEON_CN78XX_PASS2_X))) {
2531 		eptr = env_get("ddr_dbi_switchover");
2532 		if (eptr) {
2533 			printf("DBI Switchover starting...\n");
2534 			cvmx_dbi_switchover(priv);
2535 			printf("DBI Switchover finished.\n");
2536 		}
2537 	}
2538 
2539 	/* call HW-assist tuning here on chips that support it */
2540 	if ((octeon_is_cpuid(OCTEON_CN73XX)) ||
2541 	    (octeon_is_cpuid(OCTEON_CNF75XX)) ||
2542 	    (octeon_is_cpuid(OCTEON_CN78XX_PASS2_X)))
2543 		cvmx_maybe_tune_node(priv, calc_ddr_hertz);
2544 
2545 	eptr = env_get("limit_dram_mbytes");
2546 	if (eptr) {
2547 		unsigned int mbytes = simple_strtoul(eptr, NULL, 10);
2548 
2549 		if (mbytes > 0) {
2550 			memsize_mbytes = mbytes;
2551 			printf("Limiting DRAM size to %d MBytes based on limit_dram_mbytes env. variable\n",
2552 			       mbytes);
2553 		}
2554 	}
2555 
2556 	debug("LMC Initialization complete. Total DRAM %d MB\n",
2557 	      memsize_mbytes);
2558 
2559 	return memsize_mbytes;
2560 }
2561 
octeon_ddr_probe(struct udevice * dev)2562 static int octeon_ddr_probe(struct udevice *dev)
2563 {
2564 	struct ddr_priv *priv = dev_get_priv(dev);
2565 	struct ofnode_phandle_args l2c_node;
2566 	struct ddr_conf *ddr_conf_ptr;
2567 	u32 ddr_conf_valid_mask = 0;
2568 	u32 measured_ddr_hertz = 0;
2569 	int conf_table_count;
2570 	int def_ddr_freq;
2571 	u32 mem_mbytes = 0;
2572 	u32 ddr_hertz;
2573 	u32 ddr_ref_hertz;
2574 	int alt_refclk;
2575 	const char *eptr;
2576 	fdt_addr_t addr;
2577 	u64 *ptr;
2578 	u64 val;
2579 	int ret;
2580 	int i;
2581 
2582 	/* Don't try to re-init the DDR controller after relocation */
2583 	if (gd->flags & GD_FLG_RELOC)
2584 		return 0;
2585 
2586 	/*
2587 	 * Dummy read all local variables into cache, so that they are
2588 	 * locked in cache when the DDR code runs with flushes etc enabled
2589 	 */
2590 	ptr = (u64 *)_end;
2591 	for (i = 0; i < (0x100000 / sizeof(u64)); i++)
2592 		val = readq(ptr++);
2593 
2594 	/*
2595 	 * The base addresses of LMC and L2C are read from the DT. This
2596 	 * makes it possible to use the DDR init code without the need
2597 	 * of the "node" variable, describing on which node to access. The
2598 	 * node number is already included implicitly in the base addresses
2599 	 * read from the DT this way.
2600 	 */
2601 
2602 	/* Get LMC base address */
2603 	priv->lmc_base = dev_remap_addr(dev);
2604 	debug("%s: lmc_base=%p\n", __func__, priv->lmc_base);
2605 
2606 	/* Get L2C base address */
2607 	ret = dev_read_phandle_with_args(dev, "l2c-handle", NULL, 0, 0,
2608 					 &l2c_node);
2609 	if (ret) {
2610 		printf("Can't access L2C node!\n");
2611 		return -ENODEV;
2612 	}
2613 
2614 	addr = ofnode_get_addr(l2c_node.node);
2615 	if (addr == FDT_ADDR_T_NONE) {
2616 		printf("Can't access L2C node!\n");
2617 		return -ENODEV;
2618 	}
2619 
2620 	priv->l2c_base = map_physmem(addr, 0, MAP_NOCACHE);
2621 	debug("%s: l2c_base=%p\n", __func__, priv->l2c_base);
2622 
2623 	ddr_conf_ptr = octeon_ddr_conf_table_get(&conf_table_count,
2624 						 &def_ddr_freq);
2625 	if (!ddr_conf_ptr) {
2626 		printf("ERROR: unable to determine DDR configuration\n");
2627 		return -ENODEV;
2628 	}
2629 
2630 	for (i = 0; i < conf_table_count; i++) {
2631 		if (ddr_conf_ptr[i].dimm_config_table[0].spd_addrs[0] ||
2632 		    ddr_conf_ptr[i].dimm_config_table[0].spd_ptrs[0])
2633 			ddr_conf_valid_mask |= 1 << i;
2634 	}
2635 
2636 	/*
2637 	 * Check for special case of mismarked 3005 samples,
2638 	 * and adjust cpuid
2639 	 */
2640 	alt_refclk = 0;
2641 	ddr_hertz = def_ddr_freq * 1000000;
2642 
2643 	eptr = env_get("ddr_clock_hertz");
2644 	if (eptr) {
2645 		ddr_hertz = simple_strtoul(eptr, NULL, 0);
2646 		gd->mem_clk = divide_nint(ddr_hertz, 1000000);
2647 		printf("Parameter found in environment. ddr_clock_hertz = %d\n",
2648 		       ddr_hertz);
2649 	}
2650 
2651 	ddr_ref_hertz = octeon3_refclock(alt_refclk,
2652 					 ddr_hertz,
2653 					 &ddr_conf_ptr[0].dimm_config_table[0]);
2654 
2655 	debug("Initializing DDR, clock = %uhz, reference = %uhz\n",
2656 	      ddr_hertz, ddr_ref_hertz);
2657 
2658 	mem_mbytes = octeon_ddr_initialize(priv, gd->cpu_clk,
2659 					   ddr_hertz, ddr_ref_hertz,
2660 					   ddr_conf_valid_mask,
2661 					   ddr_conf_ptr, &measured_ddr_hertz);
2662 	debug("Mem size in MBYTES: %u\n", mem_mbytes);
2663 
2664 	gd->mem_clk = divide_nint(measured_ddr_hertz, 1000000);
2665 
2666 	debug("Measured DDR clock %d Hz\n", measured_ddr_hertz);
2667 
2668 	if (measured_ddr_hertz != 0) {
2669 		if (!gd->mem_clk) {
2670 			/*
2671 			 * If ddr_clock not set, use measured clock
2672 			 * and don't warn
2673 			 */
2674 			gd->mem_clk = divide_nint(measured_ddr_hertz, 1000000);
2675 		} else if ((measured_ddr_hertz > ddr_hertz + 3000000) ||
2676 			   (measured_ddr_hertz < ddr_hertz - 3000000)) {
2677 			printf("\nWARNING:\n");
2678 			printf("WARNING: Measured DDR clock mismatch!  expected: %lld MHz, measured: %lldMHz, cpu clock: %lu MHz\n",
2679 			       divide_nint(ddr_hertz, 1000000),
2680 			       divide_nint(measured_ddr_hertz, 1000000),
2681 			       gd->cpu_clk);
2682 			printf("WARNING:\n\n");
2683 			gd->mem_clk = divide_nint(measured_ddr_hertz, 1000000);
2684 		}
2685 	}
2686 
2687 	if (!mem_mbytes)
2688 		return -ENODEV;
2689 
2690 	priv->info.base = CONFIG_SYS_SDRAM_BASE;
2691 	priv->info.size = MB(mem_mbytes);
2692 
2693 	/*
2694 	 * For 6XXX generate a proper error when reading/writing
2695 	 * non-existent memory locations.
2696 	 */
2697 	cvmx_l2c_set_big_size(priv, mem_mbytes, 0);
2698 
2699 	debug("Ram size %uMiB\n", mem_mbytes);
2700 
2701 	return 0;
2702 }
2703 
octeon_get_info(struct udevice * dev,struct ram_info * info)2704 static int octeon_get_info(struct udevice *dev, struct ram_info *info)
2705 {
2706 	struct ddr_priv *priv = dev_get_priv(dev);
2707 
2708 	*info = priv->info;
2709 
2710 	return 0;
2711 }
2712 
2713 static struct ram_ops octeon_ops = {
2714 	.get_info = octeon_get_info,
2715 };
2716 
2717 static const struct udevice_id octeon_ids[] = {
2718 	{.compatible = "cavium,octeon-7xxx-ddr4" },
2719 	{ }
2720 };
2721 
2722 U_BOOT_DRIVER(octeon_ddr) = {
2723 	.name = "octeon_ddr",
2724 	.id = UCLASS_RAM,
2725 	.of_match = octeon_ids,
2726 	.ops = &octeon_ops,
2727 	.probe = octeon_ddr_probe,
2728 	.plat_auto = sizeof(struct ddr_priv),
2729 };
2730