1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * ARM-specific support for Broadcom STB S2/S3/S5 power management
4  *
5  * S2: clock gate CPUs and as many peripherals as possible
6  * S3: power off all of the chip except the Always ON (AON) island; keep DDR is
7  *     self-refresh
8  * S5: (a.k.a. S3 cold boot) much like S3, except DDR is powered down, so we
9  *     treat this mode like a soft power-off, with wakeup allowed from AON
10  *
11  * Copyright © 2014-2017 Broadcom
12  */
13 
14 #define pr_fmt(fmt) "brcmstb-pm: " fmt
15 
16 #include <linux/bitops.h>
17 #include <linux/compiler.h>
18 #include <linux/delay.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/err.h>
21 #include <linux/init.h>
22 #include <linux/io.h>
23 #include <linux/ioport.h>
24 #include <linux/kconfig.h>
25 #include <linux/kernel.h>
26 #include <linux/memblock.h>
27 #include <linux/module.h>
28 #include <linux/notifier.h>
29 #include <linux/of.h>
30 #include <linux/of_address.h>
31 #include <linux/platform_device.h>
32 #include <linux/pm.h>
33 #include <linux/printk.h>
34 #include <linux/proc_fs.h>
35 #include <linux/sizes.h>
36 #include <linux/slab.h>
37 #include <linux/sort.h>
38 #include <linux/suspend.h>
39 #include <linux/types.h>
40 #include <linux/uaccess.h>
41 #include <linux/soc/brcmstb/brcmstb.h>
42 
43 #include <asm/fncpy.h>
44 #include <asm/setup.h>
45 #include <asm/suspend.h>
46 
47 #include "pm.h"
48 #include "aon_defs.h"
49 
50 #define SHIMPHY_DDR_PAD_CNTRL		0x8c
51 
52 /* Method #0 */
53 #define SHIMPHY_PAD_PLL_SEQUENCE	BIT(8)
54 #define SHIMPHY_PAD_GATE_PLL_S3		BIT(9)
55 
56 /* Method #1 */
57 #define PWRDWN_SEQ_NO_SEQUENCING	0
58 #define PWRDWN_SEQ_HOLD_CHANNEL		1
59 #define	PWRDWN_SEQ_RESET_PLL		2
60 #define PWRDWN_SEQ_POWERDOWN_PLL	3
61 
62 #define SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK	0x00f00000
63 #define SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT	20
64 
65 #define	DDR_FORCE_CKE_RST_N		BIT(3)
66 #define	DDR_PHY_RST_N			BIT(2)
67 #define	DDR_PHY_CKE			BIT(1)
68 
69 #define	DDR_PHY_NO_CHANNEL		0xffffffff
70 
71 #define MAX_NUM_MEMC			3
72 
73 struct brcmstb_memc {
74 	void __iomem *ddr_phy_base;
75 	void __iomem *ddr_shimphy_base;
76 	void __iomem *ddr_ctrl;
77 };
78 
79 struct brcmstb_pm_control {
80 	void __iomem *aon_ctrl_base;
81 	void __iomem *aon_sram;
82 	struct brcmstb_memc memcs[MAX_NUM_MEMC];
83 
84 	void __iomem *boot_sram;
85 	size_t boot_sram_len;
86 
87 	bool support_warm_boot;
88 	size_t pll_status_offset;
89 	int num_memc;
90 
91 	struct brcmstb_s3_params *s3_params;
92 	dma_addr_t s3_params_pa;
93 	int s3entry_method;
94 	u32 warm_boot_offset;
95 	u32 phy_a_standby_ctrl_offs;
96 	u32 phy_b_standby_ctrl_offs;
97 	bool needs_ddr_pad;
98 	struct platform_device *pdev;
99 };
100 
101 enum bsp_initiate_command {
102 	BSP_CLOCK_STOP		= 0x00,
103 	BSP_GEN_RANDOM_KEY	= 0x4A,
104 	BSP_RESTORE_RANDOM_KEY	= 0x55,
105 	BSP_GEN_FIXED_KEY	= 0x63,
106 };
107 
108 #define PM_INITIATE		0x01
109 #define PM_INITIATE_SUCCESS	0x00
110 #define PM_INITIATE_FAIL	0xfe
111 
112 static struct brcmstb_pm_control ctrl;
113 
114 noinline int brcmstb_pm_s3_finish(void);
115 
116 static int (*brcmstb_pm_do_s2_sram)(void __iomem *aon_ctrl_base,
117 		void __iomem *ddr_phy_pll_status);
118 
brcmstb_init_sram(struct device_node * dn)119 static int brcmstb_init_sram(struct device_node *dn)
120 {
121 	void __iomem *sram;
122 	struct resource res;
123 	int ret;
124 
125 	ret = of_address_to_resource(dn, 0, &res);
126 	if (ret)
127 		return ret;
128 
129 	/* Uncached, executable remapping of SRAM */
130 	sram = __arm_ioremap_exec(res.start, resource_size(&res), false);
131 	if (!sram)
132 		return -ENOMEM;
133 
134 	ctrl.boot_sram = sram;
135 	ctrl.boot_sram_len = resource_size(&res);
136 
137 	return 0;
138 }
139 
140 static const struct of_device_id sram_dt_ids[] = {
141 	{ .compatible = "mmio-sram" },
142 	{ /* sentinel */ }
143 };
144 
do_bsp_initiate_command(enum bsp_initiate_command cmd)145 static int do_bsp_initiate_command(enum bsp_initiate_command cmd)
146 {
147 	void __iomem *base = ctrl.aon_ctrl_base;
148 	int ret;
149 	int timeo = 1000 * 1000; /* 1 second */
150 
151 	writel_relaxed(0, base + AON_CTRL_PM_INITIATE);
152 	(void)readl_relaxed(base + AON_CTRL_PM_INITIATE);
153 
154 	/* Go! */
155 	writel_relaxed((cmd << 1) | PM_INITIATE, base + AON_CTRL_PM_INITIATE);
156 
157 	/*
158 	 * If firmware doesn't support the 'ack', then just assume it's done
159 	 * after 10ms. Note that this only works for command 0, BSP_CLOCK_STOP
160 	 */
161 	if (of_machine_is_compatible("brcm,bcm74371a0")) {
162 		(void)readl_relaxed(base + AON_CTRL_PM_INITIATE);
163 		mdelay(10);
164 		return 0;
165 	}
166 
167 	for (;;) {
168 		ret = readl_relaxed(base + AON_CTRL_PM_INITIATE);
169 		if (!(ret & PM_INITIATE))
170 			break;
171 		if (timeo <= 0) {
172 			pr_err("error: timeout waiting for BSP (%x)\n", ret);
173 			break;
174 		}
175 		timeo -= 50;
176 		udelay(50);
177 	}
178 
179 	return (ret & 0xff) != PM_INITIATE_SUCCESS;
180 }
181 
brcmstb_pm_handshake(void)182 static int brcmstb_pm_handshake(void)
183 {
184 	void __iomem *base = ctrl.aon_ctrl_base;
185 	u32 tmp;
186 	int ret;
187 
188 	/* BSP power handshake, v1 */
189 	tmp = readl_relaxed(base + AON_CTRL_HOST_MISC_CMDS);
190 	tmp &= ~1UL;
191 	writel_relaxed(tmp, base + AON_CTRL_HOST_MISC_CMDS);
192 	(void)readl_relaxed(base + AON_CTRL_HOST_MISC_CMDS);
193 
194 	ret = do_bsp_initiate_command(BSP_CLOCK_STOP);
195 	if (ret)
196 		pr_err("BSP handshake failed\n");
197 
198 	/*
199 	 * HACK: BSP may have internal race on the CLOCK_STOP command.
200 	 * Avoid touching the BSP for a few milliseconds.
201 	 */
202 	mdelay(3);
203 
204 	return ret;
205 }
206 
shimphy_set(u32 value,u32 mask)207 static inline void shimphy_set(u32 value, u32 mask)
208 {
209 	int i;
210 
211 	if (!ctrl.needs_ddr_pad)
212 		return;
213 
214 	for (i = 0; i < ctrl.num_memc; i++) {
215 		u32 tmp;
216 
217 		tmp = readl_relaxed(ctrl.memcs[i].ddr_shimphy_base +
218 			SHIMPHY_DDR_PAD_CNTRL);
219 		tmp = value | (tmp & mask);
220 		writel_relaxed(tmp, ctrl.memcs[i].ddr_shimphy_base +
221 			SHIMPHY_DDR_PAD_CNTRL);
222 	}
223 	wmb(); /* Complete sequence in order. */
224 }
225 
ddr_ctrl_set(bool warmboot)226 static inline void ddr_ctrl_set(bool warmboot)
227 {
228 	int i;
229 
230 	for (i = 0; i < ctrl.num_memc; i++) {
231 		u32 tmp;
232 
233 		tmp = readl_relaxed(ctrl.memcs[i].ddr_ctrl +
234 				ctrl.warm_boot_offset);
235 		if (warmboot)
236 			tmp |= 1;
237 		else
238 			tmp &= ~1; /* Cold boot */
239 		writel_relaxed(tmp, ctrl.memcs[i].ddr_ctrl +
240 				ctrl.warm_boot_offset);
241 	}
242 	/* Complete sequence in order */
243 	wmb();
244 }
245 
s3entry_method0(void)246 static inline void s3entry_method0(void)
247 {
248 	shimphy_set(SHIMPHY_PAD_GATE_PLL_S3 | SHIMPHY_PAD_PLL_SEQUENCE,
249 		    0xffffffff);
250 }
251 
s3entry_method1(void)252 static inline void s3entry_method1(void)
253 {
254 	/*
255 	 * S3 Entry Sequence
256 	 * -----------------
257 	 * Step 1: SHIMPHY_ADDR_CNTL_0_DDR_PAD_CNTRL [ S3_PWRDWN_SEQ ] = 3
258 	 * Step 2: MEMC_DDR_0_WARM_BOOT [ WARM_BOOT ] = 1
259 	 */
260 	shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
261 		    SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
262 		    ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
263 
264 	ddr_ctrl_set(true);
265 }
266 
s5entry_method1(void)267 static inline void s5entry_method1(void)
268 {
269 	int i;
270 
271 	/*
272 	 * S5 Entry Sequence
273 	 * -----------------
274 	 * Step 1: SHIMPHY_ADDR_CNTL_0_DDR_PAD_CNTRL [ S3_PWRDWN_SEQ ] = 3
275 	 * Step 2: MEMC_DDR_0_WARM_BOOT [ WARM_BOOT ] = 0
276 	 * Step 3: DDR_PHY_CONTROL_REGS_[AB]_0_STANDBY_CONTROL[ CKE ] = 0
277 	 *	   DDR_PHY_CONTROL_REGS_[AB]_0_STANDBY_CONTROL[ RST_N ] = 0
278 	 */
279 	shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
280 		    SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
281 		    ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
282 
283 	ddr_ctrl_set(false);
284 
285 	for (i = 0; i < ctrl.num_memc; i++) {
286 		u32 tmp;
287 
288 		/* Step 3: Channel A (RST_N = CKE = 0) */
289 		tmp = readl_relaxed(ctrl.memcs[i].ddr_phy_base +
290 				  ctrl.phy_a_standby_ctrl_offs);
291 		tmp &= ~(DDR_PHY_RST_N | DDR_PHY_RST_N);
292 		writel_relaxed(tmp, ctrl.memcs[i].ddr_phy_base +
293 			     ctrl.phy_a_standby_ctrl_offs);
294 
295 		/* Step 3: Channel B? */
296 		if (ctrl.phy_b_standby_ctrl_offs != DDR_PHY_NO_CHANNEL) {
297 			tmp = readl_relaxed(ctrl.memcs[i].ddr_phy_base +
298 					  ctrl.phy_b_standby_ctrl_offs);
299 			tmp &= ~(DDR_PHY_RST_N | DDR_PHY_RST_N);
300 			writel_relaxed(tmp, ctrl.memcs[i].ddr_phy_base +
301 				     ctrl.phy_b_standby_ctrl_offs);
302 		}
303 	}
304 	/* Must complete */
305 	wmb();
306 }
307 
308 /*
309  * Run a Power Management State Machine (PMSM) shutdown command and put the CPU
310  * into a low-power mode
311  */
brcmstb_do_pmsm_power_down(unsigned long base_cmd,bool onewrite)312 static void brcmstb_do_pmsm_power_down(unsigned long base_cmd, bool onewrite)
313 {
314 	void __iomem *base = ctrl.aon_ctrl_base;
315 
316 	if ((ctrl.s3entry_method == 1) && (base_cmd == PM_COLD_CONFIG))
317 		s5entry_method1();
318 
319 	/* pm_start_pwrdn transition 0->1 */
320 	writel_relaxed(base_cmd, base + AON_CTRL_PM_CTRL);
321 
322 	if (!onewrite) {
323 		(void)readl_relaxed(base + AON_CTRL_PM_CTRL);
324 
325 		writel_relaxed(base_cmd | PM_PWR_DOWN, base + AON_CTRL_PM_CTRL);
326 		(void)readl_relaxed(base + AON_CTRL_PM_CTRL);
327 	}
328 	wfi();
329 }
330 
331 /* Support S5 cold boot out of "poweroff" */
brcmstb_pm_poweroff(void)332 static void brcmstb_pm_poweroff(void)
333 {
334 	brcmstb_pm_handshake();
335 
336 	/* Clear magic S3 warm-boot value */
337 	writel_relaxed(0, ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
338 	(void)readl_relaxed(ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
339 
340 	/* Skip wait-for-interrupt signal; just use a countdown */
341 	writel_relaxed(0x10, ctrl.aon_ctrl_base + AON_CTRL_PM_CPU_WAIT_COUNT);
342 	(void)readl_relaxed(ctrl.aon_ctrl_base + AON_CTRL_PM_CPU_WAIT_COUNT);
343 
344 	if (ctrl.s3entry_method == 1) {
345 		shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
346 			     SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
347 			     ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
348 		ddr_ctrl_set(false);
349 		brcmstb_do_pmsm_power_down(M1_PM_COLD_CONFIG, true);
350 		return; /* We should never actually get here */
351 	}
352 
353 	brcmstb_do_pmsm_power_down(PM_COLD_CONFIG, false);
354 }
355 
brcmstb_pm_copy_to_sram(void * fn,size_t len)356 static void *brcmstb_pm_copy_to_sram(void *fn, size_t len)
357 {
358 	unsigned int size = ALIGN(len, FNCPY_ALIGN);
359 
360 	if (ctrl.boot_sram_len < size) {
361 		pr_err("standby code will not fit in SRAM\n");
362 		return NULL;
363 	}
364 
365 	return fncpy(ctrl.boot_sram, fn, size);
366 }
367 
368 /*
369  * S2 suspend/resume picks up where we left off, so we must execute carefully
370  * from SRAM, in order to allow DDR to come back up safely before we continue.
371  */
brcmstb_pm_s2(void)372 static int brcmstb_pm_s2(void)
373 {
374 	/* A previous S3 can set a value hazardous to S2, so make sure. */
375 	if (ctrl.s3entry_method == 1) {
376 		shimphy_set((PWRDWN_SEQ_NO_SEQUENCING <<
377 			    SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
378 			    ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
379 		ddr_ctrl_set(false);
380 	}
381 
382 	brcmstb_pm_do_s2_sram = brcmstb_pm_copy_to_sram(&brcmstb_pm_do_s2,
383 			brcmstb_pm_do_s2_sz);
384 	if (!brcmstb_pm_do_s2_sram)
385 		return -EINVAL;
386 
387 	return brcmstb_pm_do_s2_sram(ctrl.aon_ctrl_base,
388 			ctrl.memcs[0].ddr_phy_base +
389 			ctrl.pll_status_offset);
390 }
391 
392 /*
393  * This function is called on a new stack, so don't allow inlining (which will
394  * generate stack references on the old stack). It cannot be made static because
395  * it is referenced from brcmstb_pm_s3()
396  */
brcmstb_pm_s3_finish(void)397 noinline int brcmstb_pm_s3_finish(void)
398 {
399 	struct brcmstb_s3_params *params = ctrl.s3_params;
400 	dma_addr_t params_pa = ctrl.s3_params_pa;
401 	phys_addr_t reentry = virt_to_phys(&cpu_resume_arm);
402 	enum bsp_initiate_command cmd;
403 	u32 flags;
404 
405 	/*
406 	 * Clear parameter structure, but not DTU area, which has already been
407 	 * filled in. We know DTU is a the end, so we can just subtract its
408 	 * size.
409 	 */
410 	memset(params, 0, sizeof(*params) - sizeof(params->dtu));
411 
412 	flags = readl_relaxed(ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
413 
414 	flags &= S3_BOOTLOADER_RESERVED;
415 	flags |= S3_FLAG_NO_MEM_VERIFY;
416 	flags |= S3_FLAG_LOAD_RANDKEY;
417 
418 	/* Load random / fixed key */
419 	if (flags & S3_FLAG_LOAD_RANDKEY)
420 		cmd = BSP_GEN_RANDOM_KEY;
421 	else
422 		cmd = BSP_GEN_FIXED_KEY;
423 	if (do_bsp_initiate_command(cmd)) {
424 		pr_info("key loading failed\n");
425 		return -EIO;
426 	}
427 
428 	params->magic = BRCMSTB_S3_MAGIC;
429 	params->reentry = reentry;
430 
431 	/* No more writes to DRAM */
432 	flush_cache_all();
433 
434 	flags |= BRCMSTB_S3_MAGIC_SHORT;
435 
436 	writel_relaxed(flags, ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
437 	writel_relaxed(lower_32_bits(params_pa),
438 		       ctrl.aon_sram + AON_REG_CONTROL_LOW);
439 	writel_relaxed(upper_32_bits(params_pa),
440 		       ctrl.aon_sram + AON_REG_CONTROL_HIGH);
441 
442 	switch (ctrl.s3entry_method) {
443 	case 0:
444 		s3entry_method0();
445 		brcmstb_do_pmsm_power_down(PM_WARM_CONFIG, false);
446 		break;
447 	case 1:
448 		s3entry_method1();
449 		brcmstb_do_pmsm_power_down(M1_PM_WARM_CONFIG, true);
450 		break;
451 	default:
452 		return -EINVAL;
453 	}
454 
455 	/* Must have been interrupted from wfi()? */
456 	return -EINTR;
457 }
458 
brcmstb_pm_do_s3(unsigned long sp)459 static int brcmstb_pm_do_s3(unsigned long sp)
460 {
461 	unsigned long save_sp;
462 	int ret;
463 
464 	asm volatile (
465 		"mov	%[save], sp\n"
466 		"mov	sp, %[new]\n"
467 		"bl	brcmstb_pm_s3_finish\n"
468 		"mov	%[ret], r0\n"
469 		"mov	%[new], sp\n"
470 		"mov	sp, %[save]\n"
471 		: [save] "=&r" (save_sp), [ret] "=&r" (ret)
472 		: [new] "r" (sp)
473 	);
474 
475 	return ret;
476 }
477 
brcmstb_pm_s3(void)478 static int brcmstb_pm_s3(void)
479 {
480 	void __iomem *sp = ctrl.boot_sram + ctrl.boot_sram_len;
481 
482 	return cpu_suspend((unsigned long)sp, brcmstb_pm_do_s3);
483 }
484 
brcmstb_pm_standby(bool deep_standby)485 static int brcmstb_pm_standby(bool deep_standby)
486 {
487 	int ret;
488 
489 	if (brcmstb_pm_handshake())
490 		return -EIO;
491 
492 	if (deep_standby)
493 		ret = brcmstb_pm_s3();
494 	else
495 		ret = brcmstb_pm_s2();
496 	if (ret)
497 		pr_err("%s: standby failed\n", __func__);
498 
499 	return ret;
500 }
501 
brcmstb_pm_enter(suspend_state_t state)502 static int brcmstb_pm_enter(suspend_state_t state)
503 {
504 	int ret = -EINVAL;
505 
506 	switch (state) {
507 	case PM_SUSPEND_STANDBY:
508 		ret = brcmstb_pm_standby(false);
509 		break;
510 	case PM_SUSPEND_MEM:
511 		ret = brcmstb_pm_standby(true);
512 		break;
513 	}
514 
515 	return ret;
516 }
517 
brcmstb_pm_valid(suspend_state_t state)518 static int brcmstb_pm_valid(suspend_state_t state)
519 {
520 	switch (state) {
521 	case PM_SUSPEND_STANDBY:
522 		return true;
523 	case PM_SUSPEND_MEM:
524 		return ctrl.support_warm_boot;
525 	default:
526 		return false;
527 	}
528 }
529 
530 static const struct platform_suspend_ops brcmstb_pm_ops = {
531 	.enter		= brcmstb_pm_enter,
532 	.valid		= brcmstb_pm_valid,
533 };
534 
535 static const struct of_device_id aon_ctrl_dt_ids[] = {
536 	{ .compatible = "brcm,brcmstb-aon-ctrl" },
537 	{}
538 };
539 
540 struct ddr_phy_ofdata {
541 	bool supports_warm_boot;
542 	size_t pll_status_offset;
543 	int s3entry_method;
544 	u32 warm_boot_offset;
545 	u32 phy_a_standby_ctrl_offs;
546 	u32 phy_b_standby_ctrl_offs;
547 };
548 
549 static struct ddr_phy_ofdata ddr_phy_71_1 = {
550 	.supports_warm_boot = true,
551 	.pll_status_offset = 0x0c,
552 	.s3entry_method = 1,
553 	.warm_boot_offset = 0x2c,
554 	.phy_a_standby_ctrl_offs = 0x198,
555 	.phy_b_standby_ctrl_offs = DDR_PHY_NO_CHANNEL
556 };
557 
558 static struct ddr_phy_ofdata ddr_phy_72_0 = {
559 	.supports_warm_boot = true,
560 	.pll_status_offset = 0x10,
561 	.s3entry_method = 1,
562 	.warm_boot_offset = 0x40,
563 	.phy_a_standby_ctrl_offs = 0x2a4,
564 	.phy_b_standby_ctrl_offs = 0x8a4
565 };
566 
567 static struct ddr_phy_ofdata ddr_phy_225_1 = {
568 	.supports_warm_boot = false,
569 	.pll_status_offset = 0x4,
570 	.s3entry_method = 0
571 };
572 
573 static struct ddr_phy_ofdata ddr_phy_240_1 = {
574 	.supports_warm_boot = true,
575 	.pll_status_offset = 0x4,
576 	.s3entry_method = 0
577 };
578 
579 static const struct of_device_id ddr_phy_dt_ids[] = {
580 	{
581 		.compatible = "brcm,brcmstb-ddr-phy-v71.1",
582 		.data = &ddr_phy_71_1,
583 	},
584 	{
585 		.compatible = "brcm,brcmstb-ddr-phy-v72.0",
586 		.data = &ddr_phy_72_0,
587 	},
588 	{
589 		.compatible = "brcm,brcmstb-ddr-phy-v225.1",
590 		.data = &ddr_phy_225_1,
591 	},
592 	{
593 		.compatible = "brcm,brcmstb-ddr-phy-v240.1",
594 		.data = &ddr_phy_240_1,
595 	},
596 	{
597 		/* Same as v240.1, for the registers we care about */
598 		.compatible = "brcm,brcmstb-ddr-phy-v240.2",
599 		.data = &ddr_phy_240_1,
600 	},
601 	{}
602 };
603 
604 struct ddr_seq_ofdata {
605 	bool needs_ddr_pad;
606 	u32 warm_boot_offset;
607 };
608 
609 static const struct ddr_seq_ofdata ddr_seq_b22 = {
610 	.needs_ddr_pad = false,
611 	.warm_boot_offset = 0x2c,
612 };
613 
614 static const struct ddr_seq_ofdata ddr_seq = {
615 	.needs_ddr_pad = true,
616 };
617 
618 static const struct of_device_id ddr_shimphy_dt_ids[] = {
619 	{ .compatible = "brcm,brcmstb-ddr-shimphy-v1.0" },
620 	{}
621 };
622 
623 static const struct of_device_id brcmstb_memc_of_match[] = {
624 	{
625 		.compatible = "brcm,brcmstb-memc-ddr-rev-b.2.1",
626 		.data = &ddr_seq,
627 	},
628 	{
629 		.compatible = "brcm,brcmstb-memc-ddr-rev-b.2.2",
630 		.data = &ddr_seq_b22,
631 	},
632 	{
633 		.compatible = "brcm,brcmstb-memc-ddr-rev-b.2.3",
634 		.data = &ddr_seq_b22,
635 	},
636 	{
637 		.compatible = "brcm,brcmstb-memc-ddr-rev-b.3.0",
638 		.data = &ddr_seq_b22,
639 	},
640 	{
641 		.compatible = "brcm,brcmstb-memc-ddr-rev-b.3.1",
642 		.data = &ddr_seq_b22,
643 	},
644 	{
645 		.compatible = "brcm,brcmstb-memc-ddr",
646 		.data = &ddr_seq,
647 	},
648 	{},
649 };
650 
brcmstb_ioremap_match(const struct of_device_id * matches,int index,const void ** ofdata)651 static void __iomem *brcmstb_ioremap_match(const struct of_device_id *matches,
652 					   int index, const void **ofdata)
653 {
654 	struct device_node *dn;
655 	const struct of_device_id *match;
656 
657 	dn = of_find_matching_node_and_match(NULL, matches, &match);
658 	if (!dn)
659 		return ERR_PTR(-EINVAL);
660 
661 	if (ofdata)
662 		*ofdata = match->data;
663 
664 	return of_io_request_and_map(dn, index, dn->full_name);
665 }
666 
brcmstb_pm_panic_notify(struct notifier_block * nb,unsigned long action,void * data)667 static int brcmstb_pm_panic_notify(struct notifier_block *nb,
668 		unsigned long action, void *data)
669 {
670 	writel_relaxed(BRCMSTB_PANIC_MAGIC, ctrl.aon_sram + AON_REG_PANIC);
671 
672 	return NOTIFY_DONE;
673 }
674 
675 static struct notifier_block brcmstb_pm_panic_nb = {
676 	.notifier_call = brcmstb_pm_panic_notify,
677 };
678 
brcmstb_pm_probe(struct platform_device * pdev)679 static int brcmstb_pm_probe(struct platform_device *pdev)
680 {
681 	const struct ddr_phy_ofdata *ddr_phy_data;
682 	const struct ddr_seq_ofdata *ddr_seq_data;
683 	const struct of_device_id *of_id = NULL;
684 	struct device_node *dn;
685 	void __iomem *base;
686 	int ret, i;
687 
688 	/* AON ctrl registers */
689 	base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL);
690 	if (IS_ERR(base)) {
691 		pr_err("error mapping AON_CTRL\n");
692 		return PTR_ERR(base);
693 	}
694 	ctrl.aon_ctrl_base = base;
695 
696 	/* AON SRAM registers */
697 	base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 1, NULL);
698 	if (IS_ERR(base)) {
699 		/* Assume standard offset */
700 		ctrl.aon_sram = ctrl.aon_ctrl_base +
701 				     AON_CTRL_SYSTEM_DATA_RAM_OFS;
702 	} else {
703 		ctrl.aon_sram = base;
704 	}
705 
706 	writel_relaxed(0, ctrl.aon_sram + AON_REG_PANIC);
707 
708 	/* DDR PHY registers */
709 	base = brcmstb_ioremap_match(ddr_phy_dt_ids, 0,
710 				     (const void **)&ddr_phy_data);
711 	if (IS_ERR(base)) {
712 		pr_err("error mapping DDR PHY\n");
713 		return PTR_ERR(base);
714 	}
715 	ctrl.support_warm_boot = ddr_phy_data->supports_warm_boot;
716 	ctrl.pll_status_offset = ddr_phy_data->pll_status_offset;
717 	/* Only need DDR PHY 0 for now? */
718 	ctrl.memcs[0].ddr_phy_base = base;
719 	ctrl.s3entry_method = ddr_phy_data->s3entry_method;
720 	ctrl.phy_a_standby_ctrl_offs = ddr_phy_data->phy_a_standby_ctrl_offs;
721 	ctrl.phy_b_standby_ctrl_offs = ddr_phy_data->phy_b_standby_ctrl_offs;
722 	/*
723 	 * Slightly grosss to use the phy ver to get a memc,
724 	 * offset but that is the only versioned things so far
725 	 * we can test for.
726 	 */
727 	ctrl.warm_boot_offset = ddr_phy_data->warm_boot_offset;
728 
729 	/* DDR SHIM-PHY registers */
730 	for_each_matching_node(dn, ddr_shimphy_dt_ids) {
731 		i = ctrl.num_memc;
732 		if (i >= MAX_NUM_MEMC) {
733 			pr_warn("too many MEMCs (max %d)\n", MAX_NUM_MEMC);
734 			break;
735 		}
736 
737 		base = of_io_request_and_map(dn, 0, dn->full_name);
738 		if (IS_ERR(base)) {
739 			if (!ctrl.support_warm_boot)
740 				break;
741 
742 			pr_err("error mapping DDR SHIMPHY %d\n", i);
743 			return PTR_ERR(base);
744 		}
745 		ctrl.memcs[i].ddr_shimphy_base = base;
746 		ctrl.num_memc++;
747 	}
748 
749 	/* Sequencer DRAM Param and Control Registers */
750 	i = 0;
751 	for_each_matching_node(dn, brcmstb_memc_of_match) {
752 		base = of_iomap(dn, 0);
753 		if (!base) {
754 			pr_err("error mapping DDR Sequencer %d\n", i);
755 			return -ENOMEM;
756 		}
757 
758 		of_id = of_match_node(brcmstb_memc_of_match, dn);
759 		if (!of_id) {
760 			iounmap(base);
761 			return -EINVAL;
762 		}
763 
764 		ddr_seq_data = of_id->data;
765 		ctrl.needs_ddr_pad = ddr_seq_data->needs_ddr_pad;
766 		/* Adjust warm boot offset based on the DDR sequencer */
767 		if (ddr_seq_data->warm_boot_offset)
768 			ctrl.warm_boot_offset = ddr_seq_data->warm_boot_offset;
769 
770 		ctrl.memcs[i].ddr_ctrl = base;
771 		i++;
772 	}
773 
774 	pr_debug("PM: supports warm boot:%d, method:%d, wboffs:%x\n",
775 		ctrl.support_warm_boot, ctrl.s3entry_method,
776 		ctrl.warm_boot_offset);
777 
778 	dn = of_find_matching_node(NULL, sram_dt_ids);
779 	if (!dn) {
780 		pr_err("SRAM not found\n");
781 		return -EINVAL;
782 	}
783 
784 	ret = brcmstb_init_sram(dn);
785 	if (ret) {
786 		pr_err("error setting up SRAM for PM\n");
787 		return ret;
788 	}
789 
790 	ctrl.pdev = pdev;
791 
792 	ctrl.s3_params = kmalloc(sizeof(*ctrl.s3_params), GFP_KERNEL);
793 	if (!ctrl.s3_params)
794 		return -ENOMEM;
795 	ctrl.s3_params_pa = dma_map_single(&pdev->dev, ctrl.s3_params,
796 					   sizeof(*ctrl.s3_params),
797 					   DMA_TO_DEVICE);
798 	if (dma_mapping_error(&pdev->dev, ctrl.s3_params_pa)) {
799 		pr_err("error mapping DMA memory\n");
800 		ret = -ENOMEM;
801 		goto out;
802 	}
803 
804 	atomic_notifier_chain_register(&panic_notifier_list,
805 				       &brcmstb_pm_panic_nb);
806 
807 	pm_power_off = brcmstb_pm_poweroff;
808 	suspend_set_ops(&brcmstb_pm_ops);
809 
810 	return 0;
811 
812 out:
813 	kfree(ctrl.s3_params);
814 
815 	pr_warn("PM: initialization failed with code %d\n", ret);
816 
817 	return ret;
818 }
819 
820 static struct platform_driver brcmstb_pm_driver = {
821 	.driver = {
822 		.name	= "brcmstb-pm",
823 		.of_match_table = aon_ctrl_dt_ids,
824 	},
825 };
826 
brcmstb_pm_init(void)827 static int __init brcmstb_pm_init(void)
828 {
829 	return platform_driver_probe(&brcmstb_pm_driver,
830 				     brcmstb_pm_probe);
831 }
832 module_init(brcmstb_pm_init);
833