1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2008, 2010-2016 Freescale Semiconductor, Inc.
4  * Copyright 2017-2018 NXP Semiconductor
5  */
6 
7 #include <common.h>
8 #include <env.h>
9 #include <hwconfig.h>
10 #include <fsl_ddr_sdram.h>
11 #include <log.h>
12 
13 #include <fsl_ddr.h>
14 #if defined(CONFIG_FSL_LSCH2) || defined(CONFIG_FSL_LSCH3) || \
15 	defined(CONFIG_ARM)
16 #include <asm/arch/clock.h>
17 #endif
18 
19 /*
20  * Use our own stack based buffer before relocation to allow accessing longer
21  * hwconfig strings that might be in the environment before we've relocated.
22  * This is pretty fragile on both the use of stack and if the buffer is big
23  * enough. However we will get a warning from env_get_f() for the latter.
24  */
25 
26 /* Board-specific functions defined in each board's ddr.c */
fsl_ddr_board_options(memctl_options_t * popts,dimm_params_t * pdimm,unsigned int ctrl_num)27 void __weak fsl_ddr_board_options(memctl_options_t *popts,
28 				  dimm_params_t *pdimm,
29 				  unsigned int ctrl_num)
30 {
31 	return;
32 }
33 
34 struct dynamic_odt {
35 	unsigned int odt_rd_cfg;
36 	unsigned int odt_wr_cfg;
37 	unsigned int odt_rtt_norm;
38 	unsigned int odt_rtt_wr;
39 };
40 
41 #ifdef CONFIG_SYS_FSL_DDR4
42 /* Quad rank is not verified yet due availability.
43  * Replacing 20 OHM with 34 OHM since DDR4 doesn't have 20 OHM option
44  */
45 static __maybe_unused const struct dynamic_odt single_Q[4] = {
46 	{	/* cs0 */
47 		FSL_DDR_ODT_NEVER,
48 		FSL_DDR_ODT_CS_AND_OTHER_DIMM,
49 		DDR4_RTT_34_OHM,	/* unverified */
50 		DDR4_RTT_120_OHM
51 	},
52 	{	/* cs1 */
53 		FSL_DDR_ODT_NEVER,
54 		FSL_DDR_ODT_NEVER,
55 		DDR4_RTT_OFF,
56 		DDR4_RTT_120_OHM
57 	},
58 	{	/* cs2 */
59 		FSL_DDR_ODT_NEVER,
60 		FSL_DDR_ODT_CS_AND_OTHER_DIMM,
61 		DDR4_RTT_34_OHM,
62 		DDR4_RTT_120_OHM
63 	},
64 	{	/* cs3 */
65 		FSL_DDR_ODT_NEVER,
66 		FSL_DDR_ODT_NEVER,	/* tied high */
67 		DDR4_RTT_OFF,
68 		DDR4_RTT_120_OHM
69 	}
70 };
71 
72 static __maybe_unused const struct dynamic_odt single_D[4] = {
73 	{	/* cs0 */
74 		FSL_DDR_ODT_NEVER,
75 		FSL_DDR_ODT_ALL,
76 		DDR4_RTT_40_OHM,
77 		DDR4_RTT_OFF
78 	},
79 	{	/* cs1 */
80 		FSL_DDR_ODT_NEVER,
81 		FSL_DDR_ODT_NEVER,
82 		DDR4_RTT_OFF,
83 		DDR4_RTT_OFF
84 	},
85 	{0, 0, 0, 0},
86 	{0, 0, 0, 0}
87 };
88 
89 static __maybe_unused const struct dynamic_odt single_S[4] = {
90 	{	/* cs0 */
91 		FSL_DDR_ODT_NEVER,
92 		FSL_DDR_ODT_ALL,
93 		DDR4_RTT_40_OHM,
94 		DDR4_RTT_OFF
95 	},
96 	{0, 0, 0, 0},
97 	{0, 0, 0, 0},
98 	{0, 0, 0, 0},
99 };
100 
101 static __maybe_unused const struct dynamic_odt dual_DD[4] = {
102 	{	/* cs0 */
103 		FSL_DDR_ODT_NEVER,
104 		FSL_DDR_ODT_SAME_DIMM,
105 		DDR4_RTT_120_OHM,
106 		DDR4_RTT_OFF
107 	},
108 	{	/* cs1 */
109 		FSL_DDR_ODT_OTHER_DIMM,
110 		FSL_DDR_ODT_OTHER_DIMM,
111 		DDR4_RTT_34_OHM,
112 		DDR4_RTT_OFF
113 	},
114 	{	/* cs2 */
115 		FSL_DDR_ODT_NEVER,
116 		FSL_DDR_ODT_SAME_DIMM,
117 		DDR4_RTT_120_OHM,
118 		DDR4_RTT_OFF
119 	},
120 	{	/* cs3 */
121 		FSL_DDR_ODT_OTHER_DIMM,
122 		FSL_DDR_ODT_OTHER_DIMM,
123 		DDR4_RTT_34_OHM,
124 		DDR4_RTT_OFF
125 	}
126 };
127 
128 static __maybe_unused const struct dynamic_odt dual_DS[4] = {
129 	{	/* cs0 */
130 		FSL_DDR_ODT_NEVER,
131 		FSL_DDR_ODT_SAME_DIMM,
132 		DDR4_RTT_120_OHM,
133 		DDR4_RTT_OFF
134 	},
135 	{	/* cs1 */
136 		FSL_DDR_ODT_OTHER_DIMM,
137 		FSL_DDR_ODT_OTHER_DIMM,
138 		DDR4_RTT_34_OHM,
139 		DDR4_RTT_OFF
140 	},
141 	{	/* cs2 */
142 		FSL_DDR_ODT_OTHER_DIMM,
143 		FSL_DDR_ODT_ALL,
144 		DDR4_RTT_34_OHM,
145 		DDR4_RTT_120_OHM
146 	},
147 	{0, 0, 0, 0}
148 };
149 static __maybe_unused const struct dynamic_odt dual_SD[4] = {
150 	{	/* cs0 */
151 		FSL_DDR_ODT_OTHER_DIMM,
152 		FSL_DDR_ODT_ALL,
153 		DDR4_RTT_34_OHM,
154 		DDR4_RTT_120_OHM
155 	},
156 	{0, 0, 0, 0},
157 	{	/* cs2 */
158 		FSL_DDR_ODT_NEVER,
159 		FSL_DDR_ODT_SAME_DIMM,
160 		DDR4_RTT_120_OHM,
161 		DDR4_RTT_OFF
162 	},
163 	{	/* cs3 */
164 		FSL_DDR_ODT_OTHER_DIMM,
165 		FSL_DDR_ODT_OTHER_DIMM,
166 		DDR4_RTT_34_OHM,
167 		DDR4_RTT_OFF
168 	}
169 };
170 
171 static __maybe_unused const struct dynamic_odt dual_SS[4] = {
172 	{	/* cs0 */
173 		FSL_DDR_ODT_OTHER_DIMM,
174 		FSL_DDR_ODT_ALL,
175 		DDR4_RTT_34_OHM,
176 		DDR4_RTT_120_OHM
177 	},
178 	{0, 0, 0, 0},
179 	{	/* cs2 */
180 		FSL_DDR_ODT_OTHER_DIMM,
181 		FSL_DDR_ODT_ALL,
182 		DDR4_RTT_34_OHM,
183 		DDR4_RTT_120_OHM
184 	},
185 	{0, 0, 0, 0}
186 };
187 
188 static __maybe_unused const struct dynamic_odt dual_D0[4] = {
189 	{	/* cs0 */
190 		FSL_DDR_ODT_NEVER,
191 		FSL_DDR_ODT_SAME_DIMM,
192 		DDR4_RTT_40_OHM,
193 		DDR4_RTT_OFF
194 	},
195 	{	/* cs1 */
196 		FSL_DDR_ODT_NEVER,
197 		FSL_DDR_ODT_NEVER,
198 		DDR4_RTT_OFF,
199 		DDR4_RTT_OFF
200 	},
201 	{0, 0, 0, 0},
202 	{0, 0, 0, 0}
203 };
204 
205 static __maybe_unused const struct dynamic_odt dual_0D[4] = {
206 	{0, 0, 0, 0},
207 	{0, 0, 0, 0},
208 	{	/* cs2 */
209 		FSL_DDR_ODT_NEVER,
210 		FSL_DDR_ODT_SAME_DIMM,
211 		DDR4_RTT_40_OHM,
212 		DDR4_RTT_OFF
213 	},
214 	{	/* cs3 */
215 		FSL_DDR_ODT_NEVER,
216 		FSL_DDR_ODT_NEVER,
217 		DDR4_RTT_OFF,
218 		DDR4_RTT_OFF
219 	}
220 };
221 
222 static __maybe_unused const struct dynamic_odt dual_S0[4] = {
223 	{	/* cs0 */
224 		FSL_DDR_ODT_NEVER,
225 		FSL_DDR_ODT_CS,
226 		DDR4_RTT_40_OHM,
227 		DDR4_RTT_OFF
228 	},
229 	{0, 0, 0, 0},
230 	{0, 0, 0, 0},
231 	{0, 0, 0, 0}
232 
233 };
234 
235 static __maybe_unused const struct dynamic_odt dual_0S[4] = {
236 	{0, 0, 0, 0},
237 	{0, 0, 0, 0},
238 	{	/* cs2 */
239 		FSL_DDR_ODT_NEVER,
240 		FSL_DDR_ODT_CS,
241 		DDR4_RTT_40_OHM,
242 		DDR4_RTT_OFF
243 	},
244 	{0, 0, 0, 0}
245 
246 };
247 
248 static __maybe_unused const struct dynamic_odt odt_unknown[4] = {
249 	{	/* cs0 */
250 		FSL_DDR_ODT_NEVER,
251 		FSL_DDR_ODT_CS,
252 		DDR4_RTT_120_OHM,
253 		DDR4_RTT_OFF
254 	},
255 	{	/* cs1 */
256 		FSL_DDR_ODT_NEVER,
257 		FSL_DDR_ODT_CS,
258 		DDR4_RTT_120_OHM,
259 		DDR4_RTT_OFF
260 	},
261 	{	/* cs2 */
262 		FSL_DDR_ODT_NEVER,
263 		FSL_DDR_ODT_CS,
264 		DDR4_RTT_120_OHM,
265 		DDR4_RTT_OFF
266 	},
267 	{	/* cs3 */
268 		FSL_DDR_ODT_NEVER,
269 		FSL_DDR_ODT_CS,
270 		DDR4_RTT_120_OHM,
271 		DDR4_RTT_OFF
272 	}
273 };
274 #elif defined(CONFIG_SYS_FSL_DDR3)
275 static __maybe_unused const struct dynamic_odt single_Q[4] = {
276 	{	/* cs0 */
277 		FSL_DDR_ODT_NEVER,
278 		FSL_DDR_ODT_CS_AND_OTHER_DIMM,
279 		DDR3_RTT_20_OHM,
280 		DDR3_RTT_120_OHM
281 	},
282 	{	/* cs1 */
283 		FSL_DDR_ODT_NEVER,
284 		FSL_DDR_ODT_NEVER,	/* tied high */
285 		DDR3_RTT_OFF,
286 		DDR3_RTT_120_OHM
287 	},
288 	{	/* cs2 */
289 		FSL_DDR_ODT_NEVER,
290 		FSL_DDR_ODT_CS_AND_OTHER_DIMM,
291 		DDR3_RTT_20_OHM,
292 		DDR3_RTT_120_OHM
293 	},
294 	{	/* cs3 */
295 		FSL_DDR_ODT_NEVER,
296 		FSL_DDR_ODT_NEVER,	/* tied high */
297 		DDR3_RTT_OFF,
298 		DDR3_RTT_120_OHM
299 	}
300 };
301 
302 static __maybe_unused const struct dynamic_odt single_D[4] = {
303 	{	/* cs0 */
304 		FSL_DDR_ODT_NEVER,
305 		FSL_DDR_ODT_ALL,
306 		DDR3_RTT_40_OHM,
307 		DDR3_RTT_OFF
308 	},
309 	{	/* cs1 */
310 		FSL_DDR_ODT_NEVER,
311 		FSL_DDR_ODT_NEVER,
312 		DDR3_RTT_OFF,
313 		DDR3_RTT_OFF
314 	},
315 	{0, 0, 0, 0},
316 	{0, 0, 0, 0}
317 };
318 
319 static __maybe_unused const struct dynamic_odt single_S[4] = {
320 	{	/* cs0 */
321 		FSL_DDR_ODT_NEVER,
322 		FSL_DDR_ODT_ALL,
323 		DDR3_RTT_40_OHM,
324 		DDR3_RTT_OFF
325 	},
326 	{0, 0, 0, 0},
327 	{0, 0, 0, 0},
328 	{0, 0, 0, 0},
329 };
330 
331 static __maybe_unused const struct dynamic_odt dual_DD[4] = {
332 	{	/* cs0 */
333 		FSL_DDR_ODT_NEVER,
334 		FSL_DDR_ODT_SAME_DIMM,
335 		DDR3_RTT_120_OHM,
336 		DDR3_RTT_OFF
337 	},
338 	{	/* cs1 */
339 		FSL_DDR_ODT_OTHER_DIMM,
340 		FSL_DDR_ODT_OTHER_DIMM,
341 		DDR3_RTT_30_OHM,
342 		DDR3_RTT_OFF
343 	},
344 	{	/* cs2 */
345 		FSL_DDR_ODT_NEVER,
346 		FSL_DDR_ODT_SAME_DIMM,
347 		DDR3_RTT_120_OHM,
348 		DDR3_RTT_OFF
349 	},
350 	{	/* cs3 */
351 		FSL_DDR_ODT_OTHER_DIMM,
352 		FSL_DDR_ODT_OTHER_DIMM,
353 		DDR3_RTT_30_OHM,
354 		DDR3_RTT_OFF
355 	}
356 };
357 
358 static __maybe_unused const struct dynamic_odt dual_DS[4] = {
359 	{	/* cs0 */
360 		FSL_DDR_ODT_NEVER,
361 		FSL_DDR_ODT_SAME_DIMM,
362 		DDR3_RTT_120_OHM,
363 		DDR3_RTT_OFF
364 	},
365 	{	/* cs1 */
366 		FSL_DDR_ODT_OTHER_DIMM,
367 		FSL_DDR_ODT_OTHER_DIMM,
368 		DDR3_RTT_30_OHM,
369 		DDR3_RTT_OFF
370 	},
371 	{	/* cs2 */
372 		FSL_DDR_ODT_OTHER_DIMM,
373 		FSL_DDR_ODT_ALL,
374 		DDR3_RTT_20_OHM,
375 		DDR3_RTT_120_OHM
376 	},
377 	{0, 0, 0, 0}
378 };
379 static __maybe_unused const struct dynamic_odt dual_SD[4] = {
380 	{	/* cs0 */
381 		FSL_DDR_ODT_OTHER_DIMM,
382 		FSL_DDR_ODT_ALL,
383 		DDR3_RTT_20_OHM,
384 		DDR3_RTT_120_OHM
385 	},
386 	{0, 0, 0, 0},
387 	{	/* cs2 */
388 		FSL_DDR_ODT_NEVER,
389 		FSL_DDR_ODT_SAME_DIMM,
390 		DDR3_RTT_120_OHM,
391 		DDR3_RTT_OFF
392 	},
393 	{	/* cs3 */
394 		FSL_DDR_ODT_OTHER_DIMM,
395 		FSL_DDR_ODT_OTHER_DIMM,
396 		DDR3_RTT_20_OHM,
397 		DDR3_RTT_OFF
398 	}
399 };
400 
401 static __maybe_unused const struct dynamic_odt dual_SS[4] = {
402 	{	/* cs0 */
403 		FSL_DDR_ODT_OTHER_DIMM,
404 		FSL_DDR_ODT_ALL,
405 		DDR3_RTT_30_OHM,
406 		DDR3_RTT_120_OHM
407 	},
408 	{0, 0, 0, 0},
409 	{	/* cs2 */
410 		FSL_DDR_ODT_OTHER_DIMM,
411 		FSL_DDR_ODT_ALL,
412 		DDR3_RTT_30_OHM,
413 		DDR3_RTT_120_OHM
414 	},
415 	{0, 0, 0, 0}
416 };
417 
418 static __maybe_unused const struct dynamic_odt dual_D0[4] = {
419 	{	/* cs0 */
420 		FSL_DDR_ODT_NEVER,
421 		FSL_DDR_ODT_SAME_DIMM,
422 		DDR3_RTT_40_OHM,
423 		DDR3_RTT_OFF
424 	},
425 	{	/* cs1 */
426 		FSL_DDR_ODT_NEVER,
427 		FSL_DDR_ODT_NEVER,
428 		DDR3_RTT_OFF,
429 		DDR3_RTT_OFF
430 	},
431 	{0, 0, 0, 0},
432 	{0, 0, 0, 0}
433 };
434 
435 static __maybe_unused const struct dynamic_odt dual_0D[4] = {
436 	{0, 0, 0, 0},
437 	{0, 0, 0, 0},
438 	{	/* cs2 */
439 		FSL_DDR_ODT_NEVER,
440 		FSL_DDR_ODT_SAME_DIMM,
441 		DDR3_RTT_40_OHM,
442 		DDR3_RTT_OFF
443 	},
444 	{	/* cs3 */
445 		FSL_DDR_ODT_NEVER,
446 		FSL_DDR_ODT_NEVER,
447 		DDR3_RTT_OFF,
448 		DDR3_RTT_OFF
449 	}
450 };
451 
452 static __maybe_unused const struct dynamic_odt dual_S0[4] = {
453 	{	/* cs0 */
454 		FSL_DDR_ODT_NEVER,
455 		FSL_DDR_ODT_CS,
456 		DDR3_RTT_40_OHM,
457 		DDR3_RTT_OFF
458 	},
459 	{0, 0, 0, 0},
460 	{0, 0, 0, 0},
461 	{0, 0, 0, 0}
462 
463 };
464 
465 static __maybe_unused const struct dynamic_odt dual_0S[4] = {
466 	{0, 0, 0, 0},
467 	{0, 0, 0, 0},
468 	{	/* cs2 */
469 		FSL_DDR_ODT_NEVER,
470 		FSL_DDR_ODT_CS,
471 		DDR3_RTT_40_OHM,
472 		DDR3_RTT_OFF
473 	},
474 	{0, 0, 0, 0}
475 
476 };
477 
478 static __maybe_unused const struct dynamic_odt odt_unknown[4] = {
479 	{	/* cs0 */
480 		FSL_DDR_ODT_NEVER,
481 		FSL_DDR_ODT_CS,
482 		DDR3_RTT_120_OHM,
483 		DDR3_RTT_OFF
484 	},
485 	{	/* cs1 */
486 		FSL_DDR_ODT_NEVER,
487 		FSL_DDR_ODT_CS,
488 		DDR3_RTT_120_OHM,
489 		DDR3_RTT_OFF
490 	},
491 	{	/* cs2 */
492 		FSL_DDR_ODT_NEVER,
493 		FSL_DDR_ODT_CS,
494 		DDR3_RTT_120_OHM,
495 		DDR3_RTT_OFF
496 	},
497 	{	/* cs3 */
498 		FSL_DDR_ODT_NEVER,
499 		FSL_DDR_ODT_CS,
500 		DDR3_RTT_120_OHM,
501 		DDR3_RTT_OFF
502 	}
503 };
504 #else	/* CONFIG_SYS_FSL_DDR3 */
505 static __maybe_unused const struct dynamic_odt single_Q[4] = {
506 	{0, 0, 0, 0},
507 	{0, 0, 0, 0},
508 	{0, 0, 0, 0},
509 	{0, 0, 0, 0}
510 };
511 
512 static __maybe_unused const struct dynamic_odt single_D[4] = {
513 	{	/* cs0 */
514 		FSL_DDR_ODT_NEVER,
515 		FSL_DDR_ODT_ALL,
516 		DDR2_RTT_150_OHM,
517 		DDR2_RTT_OFF
518 	},
519 	{	/* cs1 */
520 		FSL_DDR_ODT_NEVER,
521 		FSL_DDR_ODT_NEVER,
522 		DDR2_RTT_OFF,
523 		DDR2_RTT_OFF
524 	},
525 	{0, 0, 0, 0},
526 	{0, 0, 0, 0}
527 };
528 
529 static __maybe_unused const struct dynamic_odt single_S[4] = {
530 	{	/* cs0 */
531 		FSL_DDR_ODT_NEVER,
532 		FSL_DDR_ODT_ALL,
533 		DDR2_RTT_150_OHM,
534 		DDR2_RTT_OFF
535 	},
536 	{0, 0, 0, 0},
537 	{0, 0, 0, 0},
538 	{0, 0, 0, 0},
539 };
540 
541 static __maybe_unused const struct dynamic_odt dual_DD[4] = {
542 	{	/* cs0 */
543 		FSL_DDR_ODT_OTHER_DIMM,
544 		FSL_DDR_ODT_OTHER_DIMM,
545 		DDR2_RTT_75_OHM,
546 		DDR2_RTT_OFF
547 	},
548 	{	/* cs1 */
549 		FSL_DDR_ODT_NEVER,
550 		FSL_DDR_ODT_NEVER,
551 		DDR2_RTT_OFF,
552 		DDR2_RTT_OFF
553 	},
554 	{	/* cs2 */
555 		FSL_DDR_ODT_OTHER_DIMM,
556 		FSL_DDR_ODT_OTHER_DIMM,
557 		DDR2_RTT_75_OHM,
558 		DDR2_RTT_OFF
559 	},
560 	{	/* cs3 */
561 		FSL_DDR_ODT_NEVER,
562 		FSL_DDR_ODT_NEVER,
563 		DDR2_RTT_OFF,
564 		DDR2_RTT_OFF
565 	}
566 };
567 
568 static __maybe_unused const struct dynamic_odt dual_DS[4] = {
569 	{	/* cs0 */
570 		FSL_DDR_ODT_OTHER_DIMM,
571 		FSL_DDR_ODT_OTHER_DIMM,
572 		DDR2_RTT_75_OHM,
573 		DDR2_RTT_OFF
574 	},
575 	{	/* cs1 */
576 		FSL_DDR_ODT_NEVER,
577 		FSL_DDR_ODT_NEVER,
578 		DDR2_RTT_OFF,
579 		DDR2_RTT_OFF
580 	},
581 	{	/* cs2 */
582 		FSL_DDR_ODT_OTHER_DIMM,
583 		FSL_DDR_ODT_OTHER_DIMM,
584 		DDR2_RTT_75_OHM,
585 		DDR2_RTT_OFF
586 	},
587 	{0, 0, 0, 0}
588 };
589 
590 static __maybe_unused const struct dynamic_odt dual_SD[4] = {
591 	{	/* cs0 */
592 		FSL_DDR_ODT_OTHER_DIMM,
593 		FSL_DDR_ODT_OTHER_DIMM,
594 		DDR2_RTT_75_OHM,
595 		DDR2_RTT_OFF
596 	},
597 	{0, 0, 0, 0},
598 	{	/* cs2 */
599 		FSL_DDR_ODT_OTHER_DIMM,
600 		FSL_DDR_ODT_OTHER_DIMM,
601 		DDR2_RTT_75_OHM,
602 		DDR2_RTT_OFF
603 	},
604 	{	/* cs3 */
605 		FSL_DDR_ODT_NEVER,
606 		FSL_DDR_ODT_NEVER,
607 		DDR2_RTT_OFF,
608 		DDR2_RTT_OFF
609 	}
610 };
611 
612 static __maybe_unused const struct dynamic_odt dual_SS[4] = {
613 	{	/* cs0 */
614 		FSL_DDR_ODT_OTHER_DIMM,
615 		FSL_DDR_ODT_OTHER_DIMM,
616 		DDR2_RTT_75_OHM,
617 		DDR2_RTT_OFF
618 	},
619 	{0, 0, 0, 0},
620 	{	/* cs2 */
621 		FSL_DDR_ODT_OTHER_DIMM,
622 		FSL_DDR_ODT_OTHER_DIMM,
623 		DDR2_RTT_75_OHM,
624 		DDR2_RTT_OFF
625 	},
626 	{0, 0, 0, 0}
627 };
628 
629 static __maybe_unused const struct dynamic_odt dual_D0[4] = {
630 	{	/* cs0 */
631 		FSL_DDR_ODT_NEVER,
632 		FSL_DDR_ODT_ALL,
633 		DDR2_RTT_150_OHM,
634 		DDR2_RTT_OFF
635 	},
636 	{	/* cs1 */
637 		FSL_DDR_ODT_NEVER,
638 		FSL_DDR_ODT_NEVER,
639 		DDR2_RTT_OFF,
640 		DDR2_RTT_OFF
641 	},
642 	{0, 0, 0, 0},
643 	{0, 0, 0, 0}
644 };
645 
646 static __maybe_unused const struct dynamic_odt dual_0D[4] = {
647 	{0, 0, 0, 0},
648 	{0, 0, 0, 0},
649 	{	/* cs2 */
650 		FSL_DDR_ODT_NEVER,
651 		FSL_DDR_ODT_ALL,
652 		DDR2_RTT_150_OHM,
653 		DDR2_RTT_OFF
654 	},
655 	{	/* cs3 */
656 		FSL_DDR_ODT_NEVER,
657 		FSL_DDR_ODT_NEVER,
658 		DDR2_RTT_OFF,
659 		DDR2_RTT_OFF
660 	}
661 };
662 
663 static __maybe_unused const struct dynamic_odt dual_S0[4] = {
664 	{	/* cs0 */
665 		FSL_DDR_ODT_NEVER,
666 		FSL_DDR_ODT_CS,
667 		DDR2_RTT_150_OHM,
668 		DDR2_RTT_OFF
669 	},
670 	{0, 0, 0, 0},
671 	{0, 0, 0, 0},
672 	{0, 0, 0, 0}
673 
674 };
675 
676 static __maybe_unused const struct dynamic_odt dual_0S[4] = {
677 	{0, 0, 0, 0},
678 	{0, 0, 0, 0},
679 	{	/* cs2 */
680 		FSL_DDR_ODT_NEVER,
681 		FSL_DDR_ODT_CS,
682 		DDR2_RTT_150_OHM,
683 		DDR2_RTT_OFF
684 	},
685 	{0, 0, 0, 0}
686 
687 };
688 
689 static __maybe_unused const struct dynamic_odt odt_unknown[4] = {
690 	{	/* cs0 */
691 		FSL_DDR_ODT_NEVER,
692 		FSL_DDR_ODT_CS,
693 		DDR2_RTT_75_OHM,
694 		DDR2_RTT_OFF
695 	},
696 	{	/* cs1 */
697 		FSL_DDR_ODT_NEVER,
698 		FSL_DDR_ODT_NEVER,
699 		DDR2_RTT_OFF,
700 		DDR2_RTT_OFF
701 	},
702 	{	/* cs2 */
703 		FSL_DDR_ODT_NEVER,
704 		FSL_DDR_ODT_CS,
705 		DDR2_RTT_75_OHM,
706 		DDR2_RTT_OFF
707 	},
708 	{	/* cs3 */
709 		FSL_DDR_ODT_NEVER,
710 		FSL_DDR_ODT_NEVER,
711 		DDR2_RTT_OFF,
712 		DDR2_RTT_OFF
713 	}
714 };
715 #endif
716 
717 /*
718  * Automatically seleect bank interleaving mode based on DIMMs
719  * in this order: cs0_cs1_cs2_cs3, cs0_cs1, null.
720  * This function only deal with one or two slots per controller.
721  */
auto_bank_intlv(dimm_params_t * pdimm)722 static inline unsigned int auto_bank_intlv(dimm_params_t *pdimm)
723 {
724 #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
725 	if (pdimm[0].n_ranks == 4)
726 		return FSL_DDR_CS0_CS1_CS2_CS3;
727 	else if (pdimm[0].n_ranks == 2)
728 		return FSL_DDR_CS0_CS1;
729 #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
730 #ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
731 	if (pdimm[0].n_ranks == 4)
732 		return FSL_DDR_CS0_CS1_CS2_CS3;
733 #endif
734 	if (pdimm[0].n_ranks == 2) {
735 		if (pdimm[1].n_ranks == 2)
736 			return FSL_DDR_CS0_CS1_CS2_CS3;
737 		else
738 			return FSL_DDR_CS0_CS1;
739 	}
740 #endif
741 	return 0;
742 }
743 
populate_memctl_options(const common_timing_params_t * common_dimm,memctl_options_t * popts,dimm_params_t * pdimm,unsigned int ctrl_num)744 unsigned int populate_memctl_options(const common_timing_params_t *common_dimm,
745 			memctl_options_t *popts,
746 			dimm_params_t *pdimm,
747 			unsigned int ctrl_num)
748 {
749 	unsigned int i;
750 	char buf[HWCONFIG_BUFFER_SIZE];
751 #if defined(CONFIG_SYS_FSL_DDR3) || \
752 	defined(CONFIG_SYS_FSL_DDR2) || \
753 	defined(CONFIG_SYS_FSL_DDR4)
754 	const struct dynamic_odt *pdodt = odt_unknown;
755 #endif
756 #if (CONFIG_FSL_SDRAM_TYPE != SDRAM_TYPE_DDR4)
757 	ulong ddr_freq;
758 #endif
759 
760 	/*
761 	 * Extract hwconfig from environment since we have not properly setup
762 	 * the environment but need it for ddr config params
763 	 */
764 	if (env_get_f("hwconfig", buf, sizeof(buf)) < 0)
765 		buf[0] = '\0';
766 
767 #if defined(CONFIG_SYS_FSL_DDR3) || \
768 	defined(CONFIG_SYS_FSL_DDR2) || \
769 	defined(CONFIG_SYS_FSL_DDR4)
770 	/* Chip select options. */
771 #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
772 	switch (pdimm[0].n_ranks) {
773 	case 1:
774 		pdodt = single_S;
775 		break;
776 	case 2:
777 		pdodt = single_D;
778 		break;
779 	case 4:
780 		pdodt = single_Q;
781 		break;
782 	}
783 #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
784 	switch (pdimm[0].n_ranks) {
785 #ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
786 	case 4:
787 		pdodt = single_Q;
788 		if (pdimm[1].n_ranks)
789 			printf("Error: Quad- and Dual-rank DIMMs cannot be used together\n");
790 		break;
791 #endif
792 	case 2:
793 		switch (pdimm[1].n_ranks) {
794 		case 2:
795 			pdodt = dual_DD;
796 			break;
797 		case 1:
798 			pdodt = dual_DS;
799 			break;
800 		case 0:
801 			pdodt = dual_D0;
802 			break;
803 		}
804 		break;
805 	case 1:
806 		switch (pdimm[1].n_ranks) {
807 		case 2:
808 			pdodt = dual_SD;
809 			break;
810 		case 1:
811 			pdodt = dual_SS;
812 			break;
813 		case 0:
814 			pdodt = dual_S0;
815 			break;
816 		}
817 		break;
818 	case 0:
819 		switch (pdimm[1].n_ranks) {
820 		case 2:
821 			pdodt = dual_0D;
822 			break;
823 		case 1:
824 			pdodt = dual_0S;
825 			break;
826 		}
827 		break;
828 	}
829 #endif	/* CONFIG_DIMM_SLOTS_PER_CTLR */
830 #endif	/* CONFIG_SYS_FSL_DDR2, 3, 4 */
831 
832 	/* Pick chip-select local options. */
833 	for (i = 0; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) {
834 #if defined(CONFIG_SYS_FSL_DDR3) || \
835 	defined(CONFIG_SYS_FSL_DDR2) || \
836 	defined(CONFIG_SYS_FSL_DDR4)
837 		popts->cs_local_opts[i].odt_rd_cfg = pdodt[i].odt_rd_cfg;
838 		popts->cs_local_opts[i].odt_wr_cfg = pdodt[i].odt_wr_cfg;
839 		popts->cs_local_opts[i].odt_rtt_norm = pdodt[i].odt_rtt_norm;
840 		popts->cs_local_opts[i].odt_rtt_wr = pdodt[i].odt_rtt_wr;
841 #else
842 		popts->cs_local_opts[i].odt_rd_cfg = FSL_DDR_ODT_NEVER;
843 		popts->cs_local_opts[i].odt_wr_cfg = FSL_DDR_ODT_CS;
844 #endif
845 		popts->cs_local_opts[i].auto_precharge = 0;
846 	}
847 
848 	/* Pick interleaving mode. */
849 
850 	/*
851 	 * 0 = no interleaving
852 	 * 1 = interleaving between 2 controllers
853 	 */
854 	popts->memctl_interleaving = 0;
855 
856 	/*
857 	 * 0 = cacheline
858 	 * 1 = page
859 	 * 2 = (logical) bank
860 	 * 3 = superbank (only if CS interleaving is enabled)
861 	 */
862 	popts->memctl_interleaving_mode = 0;
863 
864 	/*
865 	 * 0: cacheline: bit 30 of the 36-bit physical addr selects the memctl
866 	 * 1: page:      bit to the left of the column bits selects the memctl
867 	 * 2: bank:      bit to the left of the bank bits selects the memctl
868 	 * 3: superbank: bit to the left of the chip select selects the memctl
869 	 *
870 	 * NOTE: ba_intlv (rank interleaving) is independent of memory
871 	 * controller interleaving; it is only within a memory controller.
872 	 * Must use superbank interleaving if rank interleaving is used and
873 	 * memory controller interleaving is enabled.
874 	 */
875 
876 	/*
877 	 * 0 = no
878 	 * 0x40 = CS0,CS1
879 	 * 0x20 = CS2,CS3
880 	 * 0x60 = CS0,CS1 + CS2,CS3
881 	 * 0x04 = CS0,CS1,CS2,CS3
882 	 */
883 	popts->ba_intlv_ctl = 0;
884 
885 	/* Memory Organization Parameters */
886 	popts->registered_dimm_en = common_dimm->all_dimms_registered;
887 
888 	/* Operational Mode Paramters */
889 
890 	/* Pick ECC modes */
891 	popts->ecc_mode = 0;		  /* 0 = disabled, 1 = enabled */
892 #ifdef CONFIG_DDR_ECC
893 	if (hwconfig_sub_f("fsl_ddr", "ecc", buf)) {
894 		if (hwconfig_subarg_cmp_f("fsl_ddr", "ecc", "on", buf))
895 			popts->ecc_mode = 1;
896 	} else
897 		popts->ecc_mode = 1;
898 #endif
899 	/* 1 = use memory controler to init data */
900 	popts->ecc_init_using_memctl = popts->ecc_mode ? 1 : 0;
901 
902 	/*
903 	 * Choose DQS config
904 	 * 0 for DDR1
905 	 * 1 for DDR2
906 	 */
907 #if defined(CONFIG_SYS_FSL_DDR1)
908 	popts->dqs_config = 0;
909 #elif defined(CONFIG_SYS_FSL_DDR2) || defined(CONFIG_SYS_FSL_DDR3)
910 	popts->dqs_config = 1;
911 #endif
912 
913 	/* Choose self-refresh during sleep. */
914 	popts->self_refresh_in_sleep = 1;
915 
916 	/* Choose dynamic power management mode. */
917 	popts->dynamic_power = 0;
918 
919 	/*
920 	 * check first dimm for primary sdram width
921 	 * presuming all dimms are similar
922 	 * 0 = 64-bit, 1 = 32-bit, 2 = 16-bit
923 	 */
924 #if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2)
925 	if (pdimm[0].n_ranks != 0) {
926 		if ((pdimm[0].data_width >= 64) && \
927 			(pdimm[0].data_width <= 72))
928 			popts->data_bus_width = 0;
929 		else if ((pdimm[0].data_width >= 32) && \
930 			(pdimm[0].data_width <= 40))
931 			popts->data_bus_width = 1;
932 		else {
933 			panic("Error: data width %u is invalid!\n",
934 				pdimm[0].data_width);
935 		}
936 	}
937 #else
938 	if (pdimm[0].n_ranks != 0) {
939 		if (pdimm[0].primary_sdram_width == 64)
940 			popts->data_bus_width = 0;
941 		else if (pdimm[0].primary_sdram_width == 32)
942 			popts->data_bus_width = 1;
943 		else if (pdimm[0].primary_sdram_width == 16)
944 			popts->data_bus_width = 2;
945 		else {
946 			panic("Error: primary sdram width %u is invalid!\n",
947 				pdimm[0].primary_sdram_width);
948 		}
949 	}
950 #endif
951 
952 	popts->x4_en = (pdimm[0].device_width == 4) ? 1 : 0;
953 
954 	/* Choose burst length. */
955 #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
956 #if defined(CONFIG_E500MC)
957 	popts->otf_burst_chop_en = 0;	/* on-the-fly burst chop disable */
958 	popts->burst_length = DDR_BL8;	/* Fixed 8-beat burst len */
959 #else
960 	if ((popts->data_bus_width == 1) || (popts->data_bus_width == 2)) {
961 		/* 32-bit or 16-bit bus */
962 		popts->otf_burst_chop_en = 0;
963 		popts->burst_length = DDR_BL8;
964 	} else {
965 		popts->otf_burst_chop_en = 1;	/* on-the-fly burst chop */
966 		popts->burst_length = DDR_OTF;	/* on-the-fly BC4 and BL8 */
967 	}
968 #endif
969 #else
970 	popts->burst_length = DDR_BL4;	/* has to be 4 for DDR2 */
971 #endif
972 
973 	/* Choose ddr controller address mirror mode */
974 #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
975 	for (i = 0; i < CONFIG_DIMM_SLOTS_PER_CTLR; i++) {
976 		if (pdimm[i].n_ranks) {
977 			popts->mirrored_dimm = pdimm[i].mirrored_dimm;
978 			break;
979 		}
980 	}
981 #endif
982 
983 	/* Global Timing Parameters. */
984 	debug("mclk_ps = %u ps\n", get_memory_clk_period_ps(ctrl_num));
985 
986 	/* Pick a caslat override. */
987 	popts->cas_latency_override = 0;
988 	popts->cas_latency_override_value = 3;
989 	if (popts->cas_latency_override) {
990 		debug("using caslat override value = %u\n",
991 		       popts->cas_latency_override_value);
992 	}
993 
994 	/* Decide whether to use the computed derated latency */
995 	popts->use_derated_caslat = 0;
996 
997 	/* Choose an additive latency. */
998 	popts->additive_latency_override = 0;
999 	popts->additive_latency_override_value = 3;
1000 	if (popts->additive_latency_override) {
1001 		debug("using additive latency override value = %u\n",
1002 		       popts->additive_latency_override_value);
1003 	}
1004 
1005 	/*
1006 	 * 2T_EN setting
1007 	 *
1008 	 * Factors to consider for 2T_EN:
1009 	 *	- number of DIMMs installed
1010 	 *	- number of components, number of active ranks
1011 	 *	- how much time you want to spend playing around
1012 	 */
1013 	popts->twot_en = 0;
1014 	popts->threet_en = 0;
1015 
1016 	/* for RDIMM and DDR4 UDIMM/discrete memory, address parity enable */
1017 	if (popts->registered_dimm_en)
1018 		popts->ap_en = 1; /* 0 = disable,  1 = enable */
1019 	else
1020 		popts->ap_en = 0; /* disabled for DDR4 UDIMM/discrete default */
1021 
1022 	if (hwconfig_sub_f("fsl_ddr", "parity", buf)) {
1023 		if (hwconfig_subarg_cmp_f("fsl_ddr", "parity", "on", buf)) {
1024 			if (popts->registered_dimm_en ||
1025 			    (CONFIG_FSL_SDRAM_TYPE == SDRAM_TYPE_DDR4))
1026 				popts->ap_en = 1;
1027 		}
1028 	}
1029 
1030 	/*
1031 	 * BSTTOPRE precharge interval
1032 	 *
1033 	 * Set this to 0 for global auto precharge
1034 	 * The value of 0x100 has been used for DDR1, DDR2, DDR3.
1035 	 * It is not wrong. Any value should be OK. The performance depends on
1036 	 * applications. There is no one good value for all. One way to set
1037 	 * is to use 1/4 of refint value.
1038 	 */
1039 	popts->bstopre = picos_to_mclk(ctrl_num, common_dimm->refresh_rate_ps)
1040 			 >> 2;
1041 
1042 	/*
1043 	 * Window for four activates -- tFAW
1044 	 *
1045 	 * FIXME: UM: applies only to DDR2/DDR3 with eight logical banks only
1046 	 * FIXME: varies depending upon number of column addresses or data
1047 	 * FIXME: width, was considering looking at pdimm->primary_sdram_width
1048 	 */
1049 #if defined(CONFIG_SYS_FSL_DDR1)
1050 	popts->tfaw_window_four_activates_ps = mclk_to_picos(ctrl_num, 1);
1051 
1052 #elif defined(CONFIG_SYS_FSL_DDR2)
1053 	/*
1054 	 * x4/x8;  some datasheets have 35000
1055 	 * x16 wide columns only?  Use 50000?
1056 	 */
1057 	popts->tfaw_window_four_activates_ps = 37500;
1058 
1059 #else
1060 	popts->tfaw_window_four_activates_ps = pdimm[0].tfaw_ps;
1061 #endif
1062 	popts->zq_en = 0;
1063 	popts->wrlvl_en = 0;
1064 #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
1065 	/*
1066 	 * due to ddr3 dimm is fly-by topology
1067 	 * we suggest to enable write leveling to
1068 	 * meet the tQDSS under different loading.
1069 	 */
1070 	popts->wrlvl_en = 1;
1071 	popts->zq_en = 1;
1072 	popts->wrlvl_override = 0;
1073 #endif
1074 
1075 	/*
1076 	 * Check interleaving configuration from environment.
1077 	 * Please refer to doc/README.fsl-ddr for the detail.
1078 	 *
1079 	 * If memory controller interleaving is enabled, then the data
1080 	 * bus widths must be programmed identically for all memory controllers.
1081 	 *
1082 	 * Attempt to set all controllers to the same chip select
1083 	 * interleaving mode. It will do a best effort to get the
1084 	 * requested ranks interleaved together such that the result
1085 	 * should be a subset of the requested configuration.
1086 	 *
1087 	 * if CONFIG_SYS_FSL_DDR_INTLV_256B is defined, mandatory interleaving
1088 	 * with 256 Byte is enabled.
1089 	 */
1090 #if (CONFIG_SYS_NUM_DDR_CTLRS > 1)
1091 	if (!hwconfig_sub_f("fsl_ddr", "ctlr_intlv", buf))
1092 #ifdef CONFIG_SYS_FSL_DDR_INTLV_256B
1093 		;
1094 #else
1095 		goto done;
1096 #endif
1097 	if (pdimm[0].n_ranks == 0) {
1098 		printf("There is no rank on CS0 for controller %d.\n", ctrl_num);
1099 		popts->memctl_interleaving = 0;
1100 		goto done;
1101 	}
1102 	popts->memctl_interleaving = 1;
1103 #ifdef CONFIG_SYS_FSL_DDR_INTLV_256B
1104 	popts->memctl_interleaving_mode = FSL_DDR_256B_INTERLEAVING;
1105 	popts->memctl_interleaving = 1;
1106 	debug("256 Byte interleaving\n");
1107 #else
1108 	/*
1109 	 * test null first. if CONFIG_HWCONFIG is not defined
1110 	 * hwconfig_arg_cmp returns non-zero
1111 	 */
1112 	if (hwconfig_subarg_cmp_f("fsl_ddr", "ctlr_intlv",
1113 				    "null", buf)) {
1114 		popts->memctl_interleaving = 0;
1115 		debug("memory controller interleaving disabled.\n");
1116 	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1117 					"ctlr_intlv",
1118 					"cacheline", buf)) {
1119 		popts->memctl_interleaving_mode =
1120 			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1121 			0 : FSL_DDR_CACHE_LINE_INTERLEAVING;
1122 		popts->memctl_interleaving =
1123 			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1124 			0 : 1;
1125 	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1126 					"ctlr_intlv",
1127 					"page", buf)) {
1128 		popts->memctl_interleaving_mode =
1129 			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1130 			0 : FSL_DDR_PAGE_INTERLEAVING;
1131 		popts->memctl_interleaving =
1132 			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1133 			0 : 1;
1134 	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1135 					"ctlr_intlv",
1136 					"bank", buf)) {
1137 		popts->memctl_interleaving_mode =
1138 			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1139 			0 : FSL_DDR_BANK_INTERLEAVING;
1140 		popts->memctl_interleaving =
1141 			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1142 			0 : 1;
1143 	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1144 					"ctlr_intlv",
1145 					"superbank", buf)) {
1146 		popts->memctl_interleaving_mode =
1147 			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1148 			0 : FSL_DDR_SUPERBANK_INTERLEAVING;
1149 		popts->memctl_interleaving =
1150 			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1151 			0 : 1;
1152 #if (CONFIG_SYS_NUM_DDR_CTLRS == 3)
1153 	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1154 					"ctlr_intlv",
1155 					"3way_1KB", buf)) {
1156 		popts->memctl_interleaving_mode =
1157 			FSL_DDR_3WAY_1KB_INTERLEAVING;
1158 	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1159 					"ctlr_intlv",
1160 					"3way_4KB", buf)) {
1161 		popts->memctl_interleaving_mode =
1162 			FSL_DDR_3WAY_4KB_INTERLEAVING;
1163 	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1164 					"ctlr_intlv",
1165 					"3way_8KB", buf)) {
1166 		popts->memctl_interleaving_mode =
1167 			FSL_DDR_3WAY_8KB_INTERLEAVING;
1168 #elif (CONFIG_SYS_NUM_DDR_CTLRS == 4)
1169 	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1170 					"ctlr_intlv",
1171 					"4way_1KB", buf)) {
1172 		popts->memctl_interleaving_mode =
1173 			FSL_DDR_4WAY_1KB_INTERLEAVING;
1174 	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1175 					"ctlr_intlv",
1176 					"4way_4KB", buf)) {
1177 		popts->memctl_interleaving_mode =
1178 			FSL_DDR_4WAY_4KB_INTERLEAVING;
1179 	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1180 					"ctlr_intlv",
1181 					"4way_8KB", buf)) {
1182 		popts->memctl_interleaving_mode =
1183 			FSL_DDR_4WAY_8KB_INTERLEAVING;
1184 #endif
1185 	} else {
1186 		popts->memctl_interleaving = 0;
1187 		printf("hwconfig has unrecognized parameter for ctlr_intlv.\n");
1188 	}
1189 #endif	/* CONFIG_SYS_FSL_DDR_INTLV_256B */
1190 done:
1191 #endif /* CONFIG_SYS_NUM_DDR_CTLRS > 1 */
1192 	if ((hwconfig_sub_f("fsl_ddr", "bank_intlv", buf)) &&
1193 		(CONFIG_CHIP_SELECTS_PER_CTRL > 1)) {
1194 		/* test null first. if CONFIG_HWCONFIG is not defined,
1195 		 * hwconfig_subarg_cmp_f returns non-zero */
1196 		if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1197 					    "null", buf))
1198 			debug("bank interleaving disabled.\n");
1199 		else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1200 						 "cs0_cs1", buf))
1201 			popts->ba_intlv_ctl = FSL_DDR_CS0_CS1;
1202 		else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1203 						 "cs2_cs3", buf))
1204 			popts->ba_intlv_ctl = FSL_DDR_CS2_CS3;
1205 		else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1206 						 "cs0_cs1_and_cs2_cs3", buf))
1207 			popts->ba_intlv_ctl = FSL_DDR_CS0_CS1_AND_CS2_CS3;
1208 		else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1209 						 "cs0_cs1_cs2_cs3", buf))
1210 			popts->ba_intlv_ctl = FSL_DDR_CS0_CS1_CS2_CS3;
1211 		else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1212 						"auto", buf))
1213 			popts->ba_intlv_ctl = auto_bank_intlv(pdimm);
1214 		else
1215 			printf("hwconfig has unrecognized parameter for bank_intlv.\n");
1216 		switch (popts->ba_intlv_ctl & FSL_DDR_CS0_CS1_CS2_CS3) {
1217 		case FSL_DDR_CS0_CS1_CS2_CS3:
1218 #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
1219 			if (pdimm[0].n_ranks < 4) {
1220 				popts->ba_intlv_ctl = 0;
1221 				printf("Not enough bank(chip-select) for "
1222 					"CS0+CS1+CS2+CS3 on controller %d, "
1223 					"interleaving disabled!\n", ctrl_num);
1224 			}
1225 #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
1226 #ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
1227 			if (pdimm[0].n_ranks == 4)
1228 				break;
1229 #endif
1230 			if ((pdimm[0].n_ranks < 2) && (pdimm[1].n_ranks < 2)) {
1231 				popts->ba_intlv_ctl = 0;
1232 				printf("Not enough bank(chip-select) for "
1233 					"CS0+CS1+CS2+CS3 on controller %d, "
1234 					"interleaving disabled!\n", ctrl_num);
1235 			}
1236 			if (pdimm[0].capacity != pdimm[1].capacity) {
1237 				popts->ba_intlv_ctl = 0;
1238 				printf("Not identical DIMM size for "
1239 					"CS0+CS1+CS2+CS3 on controller %d, "
1240 					"interleaving disabled!\n", ctrl_num);
1241 			}
1242 #endif
1243 			break;
1244 		case FSL_DDR_CS0_CS1:
1245 			if (pdimm[0].n_ranks < 2) {
1246 				popts->ba_intlv_ctl = 0;
1247 				printf("Not enough bank(chip-select) for "
1248 					"CS0+CS1 on controller %d, "
1249 					"interleaving disabled!\n", ctrl_num);
1250 			}
1251 			break;
1252 		case FSL_DDR_CS2_CS3:
1253 #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
1254 			if (pdimm[0].n_ranks < 4) {
1255 				popts->ba_intlv_ctl = 0;
1256 				printf("Not enough bank(chip-select) for CS2+CS3 "
1257 					"on controller %d, interleaving disabled!\n", ctrl_num);
1258 			}
1259 #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
1260 			if (pdimm[1].n_ranks < 2) {
1261 				popts->ba_intlv_ctl = 0;
1262 				printf("Not enough bank(chip-select) for CS2+CS3 "
1263 					"on controller %d, interleaving disabled!\n", ctrl_num);
1264 			}
1265 #endif
1266 			break;
1267 		case FSL_DDR_CS0_CS1_AND_CS2_CS3:
1268 #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
1269 			if (pdimm[0].n_ranks < 4) {
1270 				popts->ba_intlv_ctl = 0;
1271 				printf("Not enough bank(CS) for CS0+CS1 and "
1272 					"CS2+CS3 on controller %d, "
1273 					"interleaving disabled!\n", ctrl_num);
1274 			}
1275 #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
1276 			if ((pdimm[0].n_ranks < 2) || (pdimm[1].n_ranks < 2)) {
1277 				popts->ba_intlv_ctl = 0;
1278 				printf("Not enough bank(CS) for CS0+CS1 and "
1279 					"CS2+CS3 on controller %d, "
1280 					"interleaving disabled!\n", ctrl_num);
1281 			}
1282 #endif
1283 			break;
1284 		default:
1285 			popts->ba_intlv_ctl = 0;
1286 			break;
1287 		}
1288 	}
1289 
1290 	if (hwconfig_sub_f("fsl_ddr", "addr_hash", buf)) {
1291 		if (hwconfig_subarg_cmp_f("fsl_ddr", "addr_hash", "null", buf))
1292 			popts->addr_hash = 0;
1293 		else if (hwconfig_subarg_cmp_f("fsl_ddr", "addr_hash",
1294 					       "true", buf))
1295 			popts->addr_hash = 1;
1296 	}
1297 
1298 	if (pdimm[0].n_ranks == 4)
1299 		popts->quad_rank_present = 1;
1300 
1301 	popts->package_3ds = pdimm->package_3ds;
1302 
1303 #if (CONFIG_FSL_SDRAM_TYPE != SDRAM_TYPE_DDR4)
1304 	ddr_freq = get_ddr_freq(ctrl_num) / 1000000;
1305 	if (popts->registered_dimm_en) {
1306 		popts->rcw_override = 1;
1307 		popts->rcw_1 = 0x000a5a00;
1308 		if (ddr_freq <= 800)
1309 			popts->rcw_2 = 0x00000000;
1310 		else if (ddr_freq <= 1066)
1311 			popts->rcw_2 = 0x00100000;
1312 		else if (ddr_freq <= 1333)
1313 			popts->rcw_2 = 0x00200000;
1314 		else
1315 			popts->rcw_2 = 0x00300000;
1316 	}
1317 #endif
1318 
1319 	fsl_ddr_board_options(popts, pdimm, ctrl_num);
1320 
1321 	return 0;
1322 }
1323 
check_interleaving_options(fsl_ddr_info_t * pinfo)1324 void check_interleaving_options(fsl_ddr_info_t *pinfo)
1325 {
1326 	int i, j, k, check_n_ranks, intlv_invalid = 0;
1327 	unsigned int check_intlv, check_n_row_addr, check_n_col_addr;
1328 	unsigned long long check_rank_density;
1329 	struct dimm_params_s *dimm;
1330 	int first_ctrl = pinfo->first_ctrl;
1331 	int last_ctrl = first_ctrl + pinfo->num_ctrls - 1;
1332 
1333 	/*
1334 	 * Check if all controllers are configured for memory
1335 	 * controller interleaving. Identical dimms are recommended. At least
1336 	 * the size, row and col address should be checked.
1337 	 */
1338 	j = 0;
1339 	check_n_ranks = pinfo->dimm_params[first_ctrl][0].n_ranks;
1340 	check_rank_density = pinfo->dimm_params[first_ctrl][0].rank_density;
1341 	check_n_row_addr =  pinfo->dimm_params[first_ctrl][0].n_row_addr;
1342 	check_n_col_addr = pinfo->dimm_params[first_ctrl][0].n_col_addr;
1343 	check_intlv = pinfo->memctl_opts[first_ctrl].memctl_interleaving_mode;
1344 	for (i = first_ctrl; i <= last_ctrl; i++) {
1345 		dimm = &pinfo->dimm_params[i][0];
1346 		if (!pinfo->memctl_opts[i].memctl_interleaving) {
1347 			continue;
1348 		} else if (((check_rank_density != dimm->rank_density) ||
1349 		     (check_n_ranks != dimm->n_ranks) ||
1350 		     (check_n_row_addr != dimm->n_row_addr) ||
1351 		     (check_n_col_addr != dimm->n_col_addr) ||
1352 		     (check_intlv !=
1353 			pinfo->memctl_opts[i].memctl_interleaving_mode))){
1354 			intlv_invalid = 1;
1355 			break;
1356 		} else {
1357 			j++;
1358 		}
1359 
1360 	}
1361 	if (intlv_invalid) {
1362 		for (i = first_ctrl; i <= last_ctrl; i++)
1363 			pinfo->memctl_opts[i].memctl_interleaving = 0;
1364 		printf("Not all DIMMs are identical. "
1365 			"Memory controller interleaving disabled.\n");
1366 	} else {
1367 		switch (check_intlv) {
1368 		case FSL_DDR_256B_INTERLEAVING:
1369 		case FSL_DDR_CACHE_LINE_INTERLEAVING:
1370 		case FSL_DDR_PAGE_INTERLEAVING:
1371 		case FSL_DDR_BANK_INTERLEAVING:
1372 		case FSL_DDR_SUPERBANK_INTERLEAVING:
1373 #if (3 == CONFIG_SYS_NUM_DDR_CTLRS)
1374 				k = 2;
1375 #else
1376 				k = CONFIG_SYS_NUM_DDR_CTLRS;
1377 #endif
1378 			break;
1379 		case FSL_DDR_3WAY_1KB_INTERLEAVING:
1380 		case FSL_DDR_3WAY_4KB_INTERLEAVING:
1381 		case FSL_DDR_3WAY_8KB_INTERLEAVING:
1382 		case FSL_DDR_4WAY_1KB_INTERLEAVING:
1383 		case FSL_DDR_4WAY_4KB_INTERLEAVING:
1384 		case FSL_DDR_4WAY_8KB_INTERLEAVING:
1385 		default:
1386 			k = CONFIG_SYS_NUM_DDR_CTLRS;
1387 			break;
1388 		}
1389 		debug("%d of %d controllers are interleaving.\n", j, k);
1390 		if (j && (j != k)) {
1391 			for (i = first_ctrl; i <= last_ctrl; i++)
1392 				pinfo->memctl_opts[i].memctl_interleaving = 0;
1393 			if ((last_ctrl - first_ctrl) > 1)
1394 				puts("Not all controllers have compatible interleaving mode. All disabled.\n");
1395 		}
1396 	}
1397 	debug("Checking interleaving options completed\n");
1398 }
1399 
fsl_use_spd(void)1400 int fsl_use_spd(void)
1401 {
1402 	int use_spd = 0;
1403 
1404 #ifdef CONFIG_DDR_SPD
1405 	char buf[HWCONFIG_BUFFER_SIZE];
1406 
1407 	/*
1408 	 * Extract hwconfig from environment since we have not properly setup
1409 	 * the environment but need it for ddr config params
1410 	 */
1411 	if (env_get_f("hwconfig", buf, sizeof(buf)) < 0)
1412 		buf[0] = '\0';
1413 
1414 	/* if hwconfig is not enabled, or "sdram" is not defined, use spd */
1415 	if (hwconfig_sub_f("fsl_ddr", "sdram", buf)) {
1416 		if (hwconfig_subarg_cmp_f("fsl_ddr", "sdram", "spd", buf))
1417 			use_spd = 1;
1418 		else if (hwconfig_subarg_cmp_f("fsl_ddr", "sdram",
1419 					       "fixed", buf))
1420 			use_spd = 0;
1421 		else
1422 			use_spd = 1;
1423 	} else
1424 		use_spd = 1;
1425 #endif
1426 
1427 	return use_spd;
1428 }
1429