1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (c) 2014 Broadcom Corporation
4  */
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/list.h>
8 #include <linux/ssb/ssb_regs.h>
9 #include <linux/bcma/bcma.h>
10 #include <linux/bcma/bcma_regs.h>
11 
12 #include <defs.h>
13 #include <soc.h>
14 #include <brcm_hw_ids.h>
15 #include <brcmu_utils.h>
16 #include <chipcommon.h>
17 #include "debug.h"
18 #include "chip.h"
19 
20 /* SOC Interconnect types (aka chip types) */
21 #define SOCI_SB		0
22 #define SOCI_AI		1
23 
24 /* PL-368 DMP definitions */
25 #define DMP_DESC_TYPE_MSK	0x0000000F
26 #define  DMP_DESC_EMPTY		0x00000000
27 #define  DMP_DESC_VALID		0x00000001
28 #define  DMP_DESC_COMPONENT	0x00000001
29 #define  DMP_DESC_MASTER_PORT	0x00000003
30 #define  DMP_DESC_ADDRESS	0x00000005
31 #define  DMP_DESC_ADDRSIZE_GT32	0x00000008
32 #define  DMP_DESC_EOT		0x0000000F
33 
34 #define DMP_COMP_DESIGNER	0xFFF00000
35 #define DMP_COMP_DESIGNER_S	20
36 #define DMP_COMP_PARTNUM	0x000FFF00
37 #define DMP_COMP_PARTNUM_S	8
38 #define DMP_COMP_CLASS		0x000000F0
39 #define DMP_COMP_CLASS_S	4
40 #define DMP_COMP_REVISION	0xFF000000
41 #define DMP_COMP_REVISION_S	24
42 #define DMP_COMP_NUM_SWRAP	0x00F80000
43 #define DMP_COMP_NUM_SWRAP_S	19
44 #define DMP_COMP_NUM_MWRAP	0x0007C000
45 #define DMP_COMP_NUM_MWRAP_S	14
46 #define DMP_COMP_NUM_SPORT	0x00003E00
47 #define DMP_COMP_NUM_SPORT_S	9
48 #define DMP_COMP_NUM_MPORT	0x000001F0
49 #define DMP_COMP_NUM_MPORT_S	4
50 
51 #define DMP_MASTER_PORT_UID	0x0000FF00
52 #define DMP_MASTER_PORT_UID_S	8
53 #define DMP_MASTER_PORT_NUM	0x000000F0
54 #define DMP_MASTER_PORT_NUM_S	4
55 
56 #define DMP_SLAVE_ADDR_BASE	0xFFFFF000
57 #define DMP_SLAVE_ADDR_BASE_S	12
58 #define DMP_SLAVE_PORT_NUM	0x00000F00
59 #define DMP_SLAVE_PORT_NUM_S	8
60 #define DMP_SLAVE_TYPE		0x000000C0
61 #define DMP_SLAVE_TYPE_S	6
62 #define  DMP_SLAVE_TYPE_SLAVE	0
63 #define  DMP_SLAVE_TYPE_BRIDGE	1
64 #define  DMP_SLAVE_TYPE_SWRAP	2
65 #define  DMP_SLAVE_TYPE_MWRAP	3
66 #define DMP_SLAVE_SIZE_TYPE	0x00000030
67 #define DMP_SLAVE_SIZE_TYPE_S	4
68 #define  DMP_SLAVE_SIZE_4K	0
69 #define  DMP_SLAVE_SIZE_8K	1
70 #define  DMP_SLAVE_SIZE_16K	2
71 #define  DMP_SLAVE_SIZE_DESC	3
72 
73 /* EROM CompIdentB */
74 #define CIB_REV_MASK		0xff000000
75 #define CIB_REV_SHIFT		24
76 
77 /* ARM CR4 core specific control flag bits */
78 #define ARMCR4_BCMA_IOCTL_CPUHALT	0x0020
79 
80 /* D11 core specific control flag bits */
81 #define D11_BCMA_IOCTL_PHYCLOCKEN	0x0004
82 #define D11_BCMA_IOCTL_PHYRESET		0x0008
83 
84 /* chip core base & ramsize */
85 /* bcm4329 */
86 /* SDIO device core, ID 0x829 */
87 #define BCM4329_CORE_BUS_BASE		0x18011000
88 /* internal memory core, ID 0x80e */
89 #define BCM4329_CORE_SOCRAM_BASE	0x18003000
90 /* ARM Cortex M3 core, ID 0x82a */
91 #define BCM4329_CORE_ARM_BASE		0x18002000
92 
93 /* Max possibly supported memory size (limited by IO mapped memory) */
94 #define BRCMF_CHIP_MAX_MEMSIZE		(4 * 1024 * 1024)
95 
96 #define CORE_SB(base, field) \
97 		(base + SBCONFIGOFF + offsetof(struct sbconfig, field))
98 #define	SBCOREREV(sbidh) \
99 	((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \
100 	  ((sbidh) & SSB_IDHIGH_RCLO))
101 
102 struct sbconfig {
103 	u32 PAD[2];
104 	u32 sbipsflag;	/* initiator port ocp slave flag */
105 	u32 PAD[3];
106 	u32 sbtpsflag;	/* target port ocp slave flag */
107 	u32 PAD[11];
108 	u32 sbtmerrloga;	/* (sonics >= 2.3) */
109 	u32 PAD;
110 	u32 sbtmerrlog;	/* (sonics >= 2.3) */
111 	u32 PAD[3];
112 	u32 sbadmatch3;	/* address match3 */
113 	u32 PAD;
114 	u32 sbadmatch2;	/* address match2 */
115 	u32 PAD;
116 	u32 sbadmatch1;	/* address match1 */
117 	u32 PAD[7];
118 	u32 sbimstate;	/* initiator agent state */
119 	u32 sbintvec;	/* interrupt mask */
120 	u32 sbtmstatelow;	/* target state */
121 	u32 sbtmstatehigh;	/* target state */
122 	u32 sbbwa0;		/* bandwidth allocation table0 */
123 	u32 PAD;
124 	u32 sbimconfiglow;	/* initiator configuration */
125 	u32 sbimconfighigh;	/* initiator configuration */
126 	u32 sbadmatch0;	/* address match0 */
127 	u32 PAD;
128 	u32 sbtmconfiglow;	/* target configuration */
129 	u32 sbtmconfighigh;	/* target configuration */
130 	u32 sbbconfig;	/* broadcast configuration */
131 	u32 PAD;
132 	u32 sbbstate;	/* broadcast state */
133 	u32 PAD[3];
134 	u32 sbactcnfg;	/* activate configuration */
135 	u32 PAD[3];
136 	u32 sbflagst;	/* current sbflags */
137 	u32 PAD[3];
138 	u32 sbidlow;		/* identification */
139 	u32 sbidhigh;	/* identification */
140 };
141 
142 /* bankidx and bankinfo reg defines corerev >= 8 */
143 #define SOCRAM_BANKINFO_RETNTRAM_MASK	0x00010000
144 #define SOCRAM_BANKINFO_SZMASK		0x0000007f
145 #define SOCRAM_BANKIDX_ROM_MASK		0x00000100
146 
147 #define SOCRAM_BANKIDX_MEMTYPE_SHIFT	8
148 /* socram bankinfo memtype */
149 #define SOCRAM_MEMTYPE_RAM		0
150 #define SOCRAM_MEMTYPE_R0M		1
151 #define SOCRAM_MEMTYPE_DEVRAM		2
152 
153 #define SOCRAM_BANKINFO_SZBASE		8192
154 #define SRCI_LSS_MASK		0x00f00000
155 #define SRCI_LSS_SHIFT		20
156 #define	SRCI_SRNB_MASK		0xf0
157 #define	SRCI_SRNB_MASK_EXT	0x100
158 #define	SRCI_SRNB_SHIFT		4
159 #define	SRCI_SRBSZ_MASK		0xf
160 #define	SRCI_SRBSZ_SHIFT	0
161 #define SR_BSZ_BASE		14
162 
163 struct sbsocramregs {
164 	u32 coreinfo;
165 	u32 bwalloc;
166 	u32 extracoreinfo;
167 	u32 biststat;
168 	u32 bankidx;
169 	u32 standbyctrl;
170 
171 	u32 errlogstatus;	/* rev 6 */
172 	u32 errlogaddr;	/* rev 6 */
173 	/* used for patching rev 3 & 5 */
174 	u32 cambankidx;
175 	u32 cambankstandbyctrl;
176 	u32 cambankpatchctrl;
177 	u32 cambankpatchtblbaseaddr;
178 	u32 cambankcmdreg;
179 	u32 cambankdatareg;
180 	u32 cambankmaskreg;
181 	u32 PAD[1];
182 	u32 bankinfo;	/* corev 8 */
183 	u32 bankpda;
184 	u32 PAD[14];
185 	u32 extmemconfig;
186 	u32 extmemparitycsr;
187 	u32 extmemparityerrdata;
188 	u32 extmemparityerrcnt;
189 	u32 extmemwrctrlandsize;
190 	u32 PAD[84];
191 	u32 workaround;
192 	u32 pwrctl;		/* corerev >= 2 */
193 	u32 PAD[133];
194 	u32 sr_control;     /* corerev >= 15 */
195 	u32 sr_status;      /* corerev >= 15 */
196 	u32 sr_address;     /* corerev >= 15 */
197 	u32 sr_data;        /* corerev >= 15 */
198 };
199 
200 #define SOCRAMREGOFFS(_f)	offsetof(struct sbsocramregs, _f)
201 #define SYSMEMREGOFFS(_f)	offsetof(struct sbsocramregs, _f)
202 
203 #define ARMCR4_CAP		(0x04)
204 #define ARMCR4_BANKIDX		(0x40)
205 #define ARMCR4_BANKINFO		(0x44)
206 #define ARMCR4_BANKPDA		(0x4C)
207 
208 #define	ARMCR4_TCBBNB_MASK	0xf0
209 #define	ARMCR4_TCBBNB_SHIFT	4
210 #define	ARMCR4_TCBANB_MASK	0xf
211 #define	ARMCR4_TCBANB_SHIFT	0
212 
213 #define	ARMCR4_BSZ_MASK		0x3f
214 #define	ARMCR4_BSZ_MULT		8192
215 
216 struct brcmf_core_priv {
217 	struct brcmf_core pub;
218 	u32 wrapbase;
219 	struct list_head list;
220 	struct brcmf_chip_priv *chip;
221 };
222 
223 struct brcmf_chip_priv {
224 	struct brcmf_chip pub;
225 	const struct brcmf_buscore_ops *ops;
226 	void *ctx;
227 	/* assured first core is chipcommon, second core is buscore */
228 	struct list_head cores;
229 	u16 num_cores;
230 
231 	bool (*iscoreup)(struct brcmf_core_priv *core);
232 	void (*coredisable)(struct brcmf_core_priv *core, u32 prereset,
233 			    u32 reset);
234 	void (*resetcore)(struct brcmf_core_priv *core, u32 prereset, u32 reset,
235 			  u32 postreset);
236 };
237 
238 static void brcmf_chip_sb_corerev(struct brcmf_chip_priv *ci,
239 				  struct brcmf_core *core)
240 {
241 	u32 regdata;
242 
243 	regdata = ci->ops->read32(ci->ctx, CORE_SB(core->base, sbidhigh));
244 	core->rev = SBCOREREV(regdata);
245 }
246 
247 static bool brcmf_chip_sb_iscoreup(struct brcmf_core_priv *core)
248 {
249 	struct brcmf_chip_priv *ci;
250 	u32 regdata;
251 	u32 address;
252 
253 	ci = core->chip;
254 	address = CORE_SB(core->pub.base, sbtmstatelow);
255 	regdata = ci->ops->read32(ci->ctx, address);
256 	regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT |
257 		    SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK);
258 	return SSB_TMSLOW_CLOCK == regdata;
259 }
260 
261 static bool brcmf_chip_ai_iscoreup(struct brcmf_core_priv *core)
262 {
263 	struct brcmf_chip_priv *ci;
264 	u32 regdata;
265 	bool ret;
266 
267 	ci = core->chip;
268 	regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
269 	ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK;
270 
271 	regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL);
272 	ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0);
273 
274 	return ret;
275 }
276 
277 static void brcmf_chip_sb_coredisable(struct brcmf_core_priv *core,
278 				      u32 prereset, u32 reset)
279 {
280 	struct brcmf_chip_priv *ci;
281 	u32 val, base;
282 
283 	ci = core->chip;
284 	base = core->pub.base;
285 	val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
286 	if (val & SSB_TMSLOW_RESET)
287 		return;
288 
289 	val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
290 	if ((val & SSB_TMSLOW_CLOCK) != 0) {
291 		/*
292 		 * set target reject and spin until busy is clear
293 		 * (preserve core-specific bits)
294 		 */
295 		val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
296 		ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
297 					 val | SSB_TMSLOW_REJECT);
298 
299 		val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
300 		udelay(1);
301 		SPINWAIT((ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh))
302 			  & SSB_TMSHIGH_BUSY), 100000);
303 
304 		val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh));
305 		if (val & SSB_TMSHIGH_BUSY)
306 			brcmf_err("core state still busy\n");
307 
308 		val = ci->ops->read32(ci->ctx, CORE_SB(base, sbidlow));
309 		if (val & SSB_IDLOW_INITIATOR) {
310 			val = ci->ops->read32(ci->ctx,
311 					      CORE_SB(base, sbimstate));
312 			val |= SSB_IMSTATE_REJECT;
313 			ci->ops->write32(ci->ctx,
314 					 CORE_SB(base, sbimstate), val);
315 			val = ci->ops->read32(ci->ctx,
316 					      CORE_SB(base, sbimstate));
317 			udelay(1);
318 			SPINWAIT((ci->ops->read32(ci->ctx,
319 						  CORE_SB(base, sbimstate)) &
320 				  SSB_IMSTATE_BUSY), 100000);
321 		}
322 
323 		/* set reset and reject while enabling the clocks */
324 		val = SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
325 		      SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET;
326 		ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow), val);
327 		val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
328 		udelay(10);
329 
330 		/* clear the initiator reject bit */
331 		val = ci->ops->read32(ci->ctx, CORE_SB(base, sbidlow));
332 		if (val & SSB_IDLOW_INITIATOR) {
333 			val = ci->ops->read32(ci->ctx,
334 					      CORE_SB(base, sbimstate));
335 			val &= ~SSB_IMSTATE_REJECT;
336 			ci->ops->write32(ci->ctx,
337 					 CORE_SB(base, sbimstate), val);
338 		}
339 	}
340 
341 	/* leave reset and reject asserted */
342 	ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
343 			 (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET));
344 	udelay(1);
345 }
346 
347 static void brcmf_chip_ai_coredisable(struct brcmf_core_priv *core,
348 				      u32 prereset, u32 reset)
349 {
350 	struct brcmf_chip_priv *ci;
351 	u32 regdata;
352 
353 	ci = core->chip;
354 
355 	/* if core is already in reset, skip reset */
356 	regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL);
357 	if ((regdata & BCMA_RESET_CTL_RESET) != 0)
358 		goto in_reset_configure;
359 
360 	/* configure reset */
361 	ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
362 			 prereset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
363 	ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
364 
365 	/* put in reset */
366 	ci->ops->write32(ci->ctx, core->wrapbase + BCMA_RESET_CTL,
367 			 BCMA_RESET_CTL_RESET);
368 	usleep_range(10, 20);
369 
370 	/* wait till reset is 1 */
371 	SPINWAIT(ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) !=
372 		 BCMA_RESET_CTL_RESET, 300);
373 
374 in_reset_configure:
375 	/* in-reset configure */
376 	ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
377 			 reset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
378 	ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
379 }
380 
381 static void brcmf_chip_sb_resetcore(struct brcmf_core_priv *core, u32 prereset,
382 				    u32 reset, u32 postreset)
383 {
384 	struct brcmf_chip_priv *ci;
385 	u32 regdata;
386 	u32 base;
387 
388 	ci = core->chip;
389 	base = core->pub.base;
390 	/*
391 	 * Must do the disable sequence first to work for
392 	 * arbitrary current core state.
393 	 */
394 	brcmf_chip_sb_coredisable(core, 0, 0);
395 
396 	/*
397 	 * Now do the initialization sequence.
398 	 * set reset while enabling the clock and
399 	 * forcing them on throughout the core
400 	 */
401 	ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
402 			 SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
403 			 SSB_TMSLOW_RESET);
404 	regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
405 	udelay(1);
406 
407 	/* clear any serror */
408 	regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh));
409 	if (regdata & SSB_TMSHIGH_SERR)
410 		ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatehigh), 0);
411 
412 	regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbimstate));
413 	if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO)) {
414 		regdata &= ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO);
415 		ci->ops->write32(ci->ctx, CORE_SB(base, sbimstate), regdata);
416 	}
417 
418 	/* clear reset and allow it to propagate throughout the core */
419 	ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
420 			 SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK);
421 	regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
422 	udelay(1);
423 
424 	/* leave clock enabled */
425 	ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
426 			 SSB_TMSLOW_CLOCK);
427 	regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
428 	udelay(1);
429 }
430 
431 static void brcmf_chip_ai_resetcore(struct brcmf_core_priv *core, u32 prereset,
432 				    u32 reset, u32 postreset)
433 {
434 	struct brcmf_chip_priv *ci;
435 	int count;
436 
437 	ci = core->chip;
438 
439 	/* must disable first to work for arbitrary current core state */
440 	brcmf_chip_ai_coredisable(core, prereset, reset);
441 
442 	count = 0;
443 	while (ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) &
444 	       BCMA_RESET_CTL_RESET) {
445 		ci->ops->write32(ci->ctx, core->wrapbase + BCMA_RESET_CTL, 0);
446 		count++;
447 		if (count > 50)
448 			break;
449 		usleep_range(40, 60);
450 	}
451 
452 	ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
453 			 postreset | BCMA_IOCTL_CLK);
454 	ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
455 }
456 
457 char *brcmf_chip_name(u32 id, u32 rev, char *buf, uint len)
458 {
459 	const char *fmt;
460 
461 	fmt = ((id > 0xa000) || (id < 0x4000)) ? "BCM%d/%u" : "BCM%x/%u";
462 	snprintf(buf, len, fmt, id, rev);
463 	return buf;
464 }
465 
466 static struct brcmf_core *brcmf_chip_add_core(struct brcmf_chip_priv *ci,
467 					      u16 coreid, u32 base,
468 					      u32 wrapbase)
469 {
470 	struct brcmf_core_priv *core;
471 
472 	core = kzalloc(sizeof(*core), GFP_KERNEL);
473 	if (!core)
474 		return ERR_PTR(-ENOMEM);
475 
476 	core->pub.id = coreid;
477 	core->pub.base = base;
478 	core->chip = ci;
479 	core->wrapbase = wrapbase;
480 
481 	list_add_tail(&core->list, &ci->cores);
482 	return &core->pub;
483 }
484 
485 /* safety check for chipinfo */
486 static int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
487 {
488 	struct brcmf_core_priv *core;
489 	bool need_socram = false;
490 	bool has_socram = false;
491 	bool cpu_found = false;
492 	int idx = 1;
493 
494 	list_for_each_entry(core, &ci->cores, list) {
495 		brcmf_dbg(INFO, " [%-2d] core 0x%x:%-2d base 0x%08x wrap 0x%08x\n",
496 			  idx++, core->pub.id, core->pub.rev, core->pub.base,
497 			  core->wrapbase);
498 
499 		switch (core->pub.id) {
500 		case BCMA_CORE_ARM_CM3:
501 			cpu_found = true;
502 			need_socram = true;
503 			break;
504 		case BCMA_CORE_INTERNAL_MEM:
505 			has_socram = true;
506 			break;
507 		case BCMA_CORE_ARM_CR4:
508 			cpu_found = true;
509 			break;
510 		case BCMA_CORE_ARM_CA7:
511 			cpu_found = true;
512 			break;
513 		default:
514 			break;
515 		}
516 	}
517 
518 	if (!cpu_found) {
519 		brcmf_err("CPU core not detected\n");
520 		return -ENXIO;
521 	}
522 	/* check RAM core presence for ARM CM3 core */
523 	if (need_socram && !has_socram) {
524 		brcmf_err("RAM core not provided with ARM CM3 core\n");
525 		return -ENODEV;
526 	}
527 	return 0;
528 }
529 
530 static u32 brcmf_chip_core_read32(struct brcmf_core_priv *core, u16 reg)
531 {
532 	return core->chip->ops->read32(core->chip->ctx, core->pub.base + reg);
533 }
534 
535 static void brcmf_chip_core_write32(struct brcmf_core_priv *core,
536 				    u16 reg, u32 val)
537 {
538 	core->chip->ops->write32(core->chip->ctx, core->pub.base + reg, val);
539 }
540 
541 static bool brcmf_chip_socram_banksize(struct brcmf_core_priv *core, u8 idx,
542 				       u32 *banksize)
543 {
544 	u32 bankinfo;
545 	u32 bankidx = (SOCRAM_MEMTYPE_RAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
546 
547 	bankidx |= idx;
548 	brcmf_chip_core_write32(core, SOCRAMREGOFFS(bankidx), bankidx);
549 	bankinfo = brcmf_chip_core_read32(core, SOCRAMREGOFFS(bankinfo));
550 	*banksize = (bankinfo & SOCRAM_BANKINFO_SZMASK) + 1;
551 	*banksize *= SOCRAM_BANKINFO_SZBASE;
552 	return !!(bankinfo & SOCRAM_BANKINFO_RETNTRAM_MASK);
553 }
554 
555 static void brcmf_chip_socram_ramsize(struct brcmf_core_priv *sr, u32 *ramsize,
556 				      u32 *srsize)
557 {
558 	u32 coreinfo;
559 	uint nb, banksize, lss;
560 	bool retent;
561 	int i;
562 
563 	*ramsize = 0;
564 	*srsize = 0;
565 
566 	if (WARN_ON(sr->pub.rev < 4))
567 		return;
568 
569 	if (!brcmf_chip_iscoreup(&sr->pub))
570 		brcmf_chip_resetcore(&sr->pub, 0, 0, 0);
571 
572 	/* Get info for determining size */
573 	coreinfo = brcmf_chip_core_read32(sr, SOCRAMREGOFFS(coreinfo));
574 	nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
575 
576 	if ((sr->pub.rev <= 7) || (sr->pub.rev == 12)) {
577 		banksize = (coreinfo & SRCI_SRBSZ_MASK);
578 		lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT;
579 		if (lss != 0)
580 			nb--;
581 		*ramsize = nb * (1 << (banksize + SR_BSZ_BASE));
582 		if (lss != 0)
583 			*ramsize += (1 << ((lss - 1) + SR_BSZ_BASE));
584 	} else {
585 		/* length of SRAM Banks increased for corerev greater than 23 */
586 		if (sr->pub.rev >= 23) {
587 			nb = (coreinfo & (SRCI_SRNB_MASK | SRCI_SRNB_MASK_EXT))
588 				>> SRCI_SRNB_SHIFT;
589 		} else {
590 			nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
591 		}
592 		for (i = 0; i < nb; i++) {
593 			retent = brcmf_chip_socram_banksize(sr, i, &banksize);
594 			*ramsize += banksize;
595 			if (retent)
596 				*srsize += banksize;
597 		}
598 	}
599 
600 	/* hardcoded save&restore memory sizes */
601 	switch (sr->chip->pub.chip) {
602 	case BRCM_CC_4334_CHIP_ID:
603 		if (sr->chip->pub.chiprev < 2)
604 			*srsize = (32 * 1024);
605 		break;
606 	case BRCM_CC_43430_CHIP_ID:
607 		/* assume sr for now as we can not check
608 		 * firmware sr capability at this point.
609 		 */
610 		*srsize = (64 * 1024);
611 		break;
612 	default:
613 		break;
614 	}
615 }
616 
617 /** Return the SYS MEM size */
618 static u32 brcmf_chip_sysmem_ramsize(struct brcmf_core_priv *sysmem)
619 {
620 	u32 memsize = 0;
621 	u32 coreinfo;
622 	u32 idx;
623 	u32 nb;
624 	u32 banksize;
625 
626 	if (!brcmf_chip_iscoreup(&sysmem->pub))
627 		brcmf_chip_resetcore(&sysmem->pub, 0, 0, 0);
628 
629 	coreinfo = brcmf_chip_core_read32(sysmem, SYSMEMREGOFFS(coreinfo));
630 	nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
631 
632 	for (idx = 0; idx < nb; idx++) {
633 		brcmf_chip_socram_banksize(sysmem, idx, &banksize);
634 		memsize += banksize;
635 	}
636 
637 	return memsize;
638 }
639 
640 /** Return the TCM-RAM size of the ARMCR4 core. */
641 static u32 brcmf_chip_tcm_ramsize(struct brcmf_core_priv *cr4)
642 {
643 	u32 corecap;
644 	u32 memsize = 0;
645 	u32 nab;
646 	u32 nbb;
647 	u32 totb;
648 	u32 bxinfo;
649 	u32 idx;
650 
651 	corecap = brcmf_chip_core_read32(cr4, ARMCR4_CAP);
652 
653 	nab = (corecap & ARMCR4_TCBANB_MASK) >> ARMCR4_TCBANB_SHIFT;
654 	nbb = (corecap & ARMCR4_TCBBNB_MASK) >> ARMCR4_TCBBNB_SHIFT;
655 	totb = nab + nbb;
656 
657 	for (idx = 0; idx < totb; idx++) {
658 		brcmf_chip_core_write32(cr4, ARMCR4_BANKIDX, idx);
659 		bxinfo = brcmf_chip_core_read32(cr4, ARMCR4_BANKINFO);
660 		memsize += ((bxinfo & ARMCR4_BSZ_MASK) + 1) * ARMCR4_BSZ_MULT;
661 	}
662 
663 	return memsize;
664 }
665 
666 static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
667 {
668 	switch (ci->pub.chip) {
669 	case BRCM_CC_4345_CHIP_ID:
670 		return 0x198000;
671 	case BRCM_CC_4335_CHIP_ID:
672 	case BRCM_CC_4339_CHIP_ID:
673 	case BRCM_CC_4350_CHIP_ID:
674 	case BRCM_CC_4354_CHIP_ID:
675 	case BRCM_CC_4356_CHIP_ID:
676 	case BRCM_CC_43567_CHIP_ID:
677 	case BRCM_CC_43569_CHIP_ID:
678 	case BRCM_CC_43570_CHIP_ID:
679 	case BRCM_CC_4358_CHIP_ID:
680 	case BRCM_CC_4359_CHIP_ID:
681 	case BRCM_CC_43602_CHIP_ID:
682 	case BRCM_CC_4371_CHIP_ID:
683 		return 0x180000;
684 	case BRCM_CC_43465_CHIP_ID:
685 	case BRCM_CC_43525_CHIP_ID:
686 	case BRCM_CC_4365_CHIP_ID:
687 	case BRCM_CC_4366_CHIP_ID:
688 	case BRCM_CC_43664_CHIP_ID:
689 		return 0x200000;
690 	case CY_CC_4373_CHIP_ID:
691 		return 0x160000;
692 	default:
693 		brcmf_err("unknown chip: %s\n", ci->pub.name);
694 		break;
695 	}
696 	return 0;
697 }
698 
699 int brcmf_chip_get_raminfo(struct brcmf_chip *pub)
700 {
701 	struct brcmf_chip_priv *ci = container_of(pub, struct brcmf_chip_priv,
702 						  pub);
703 	struct brcmf_core_priv *mem_core;
704 	struct brcmf_core *mem;
705 
706 	mem = brcmf_chip_get_core(&ci->pub, BCMA_CORE_ARM_CR4);
707 	if (mem) {
708 		mem_core = container_of(mem, struct brcmf_core_priv, pub);
709 		ci->pub.ramsize = brcmf_chip_tcm_ramsize(mem_core);
710 		ci->pub.rambase = brcmf_chip_tcm_rambase(ci);
711 		if (!ci->pub.rambase) {
712 			brcmf_err("RAM base not provided with ARM CR4 core\n");
713 			return -EINVAL;
714 		}
715 	} else {
716 		mem = brcmf_chip_get_core(&ci->pub, BCMA_CORE_SYS_MEM);
717 		if (mem) {
718 			mem_core = container_of(mem, struct brcmf_core_priv,
719 						pub);
720 			ci->pub.ramsize = brcmf_chip_sysmem_ramsize(mem_core);
721 			ci->pub.rambase = brcmf_chip_tcm_rambase(ci);
722 			if (!ci->pub.rambase) {
723 				brcmf_err("RAM base not provided with ARM CA7 core\n");
724 				return -EINVAL;
725 			}
726 		} else {
727 			mem = brcmf_chip_get_core(&ci->pub,
728 						  BCMA_CORE_INTERNAL_MEM);
729 			if (!mem) {
730 				brcmf_err("No memory cores found\n");
731 				return -ENOMEM;
732 			}
733 			mem_core = container_of(mem, struct brcmf_core_priv,
734 						pub);
735 			brcmf_chip_socram_ramsize(mem_core, &ci->pub.ramsize,
736 						  &ci->pub.srsize);
737 		}
738 	}
739 	brcmf_dbg(INFO, "RAM: base=0x%x size=%d (0x%x) sr=%d (0x%x)\n",
740 		  ci->pub.rambase, ci->pub.ramsize, ci->pub.ramsize,
741 		  ci->pub.srsize, ci->pub.srsize);
742 
743 	if (!ci->pub.ramsize) {
744 		brcmf_err("RAM size is undetermined\n");
745 		return -ENOMEM;
746 	}
747 
748 	if (ci->pub.ramsize > BRCMF_CHIP_MAX_MEMSIZE) {
749 		brcmf_err("RAM size is incorrect\n");
750 		return -ENOMEM;
751 	}
752 
753 	return 0;
754 }
755 
756 static u32 brcmf_chip_dmp_get_desc(struct brcmf_chip_priv *ci, u32 *eromaddr,
757 				   u8 *type)
758 {
759 	u32 val;
760 
761 	/* read next descriptor */
762 	val = ci->ops->read32(ci->ctx, *eromaddr);
763 	*eromaddr += 4;
764 
765 	if (!type)
766 		return val;
767 
768 	/* determine descriptor type */
769 	*type = (val & DMP_DESC_TYPE_MSK);
770 	if ((*type & ~DMP_DESC_ADDRSIZE_GT32) == DMP_DESC_ADDRESS)
771 		*type = DMP_DESC_ADDRESS;
772 
773 	return val;
774 }
775 
776 static int brcmf_chip_dmp_get_regaddr(struct brcmf_chip_priv *ci, u32 *eromaddr,
777 				      u32 *regbase, u32 *wrapbase)
778 {
779 	u8 desc;
780 	u32 val, szdesc;
781 	u8 stype, sztype, wraptype;
782 
783 	*regbase = 0;
784 	*wrapbase = 0;
785 
786 	val = brcmf_chip_dmp_get_desc(ci, eromaddr, &desc);
787 	if (desc == DMP_DESC_MASTER_PORT) {
788 		wraptype = DMP_SLAVE_TYPE_MWRAP;
789 	} else if (desc == DMP_DESC_ADDRESS) {
790 		/* revert erom address */
791 		*eromaddr -= 4;
792 		wraptype = DMP_SLAVE_TYPE_SWRAP;
793 	} else {
794 		*eromaddr -= 4;
795 		return -EILSEQ;
796 	}
797 
798 	do {
799 		/* locate address descriptor */
800 		do {
801 			val = brcmf_chip_dmp_get_desc(ci, eromaddr, &desc);
802 			/* unexpected table end */
803 			if (desc == DMP_DESC_EOT) {
804 				*eromaddr -= 4;
805 				return -EFAULT;
806 			}
807 		} while (desc != DMP_DESC_ADDRESS &&
808 			 desc != DMP_DESC_COMPONENT);
809 
810 		/* stop if we crossed current component border */
811 		if (desc == DMP_DESC_COMPONENT) {
812 			*eromaddr -= 4;
813 			return 0;
814 		}
815 
816 		/* skip upper 32-bit address descriptor */
817 		if (val & DMP_DESC_ADDRSIZE_GT32)
818 			brcmf_chip_dmp_get_desc(ci, eromaddr, NULL);
819 
820 		sztype = (val & DMP_SLAVE_SIZE_TYPE) >> DMP_SLAVE_SIZE_TYPE_S;
821 
822 		/* next size descriptor can be skipped */
823 		if (sztype == DMP_SLAVE_SIZE_DESC) {
824 			szdesc = brcmf_chip_dmp_get_desc(ci, eromaddr, NULL);
825 			/* skip upper size descriptor if present */
826 			if (szdesc & DMP_DESC_ADDRSIZE_GT32)
827 				brcmf_chip_dmp_get_desc(ci, eromaddr, NULL);
828 		}
829 
830 		/* look for 4K or 8K register regions */
831 		if (sztype != DMP_SLAVE_SIZE_4K &&
832 		    sztype != DMP_SLAVE_SIZE_8K)
833 			continue;
834 
835 		stype = (val & DMP_SLAVE_TYPE) >> DMP_SLAVE_TYPE_S;
836 
837 		/* only regular slave and wrapper */
838 		if (*regbase == 0 && stype == DMP_SLAVE_TYPE_SLAVE)
839 			*regbase = val & DMP_SLAVE_ADDR_BASE;
840 		if (*wrapbase == 0 && stype == wraptype)
841 			*wrapbase = val & DMP_SLAVE_ADDR_BASE;
842 	} while (*regbase == 0 || *wrapbase == 0);
843 
844 	return 0;
845 }
846 
847 static
848 int brcmf_chip_dmp_erom_scan(struct brcmf_chip_priv *ci)
849 {
850 	struct brcmf_core *core;
851 	u32 eromaddr;
852 	u8 desc_type = 0;
853 	u32 val;
854 	u16 id;
855 	u8 nmw, nsw, rev;
856 	u32 base, wrap;
857 	int err;
858 
859 	eromaddr = ci->ops->read32(ci->ctx, CORE_CC_REG(SI_ENUM_BASE, eromptr));
860 
861 	while (desc_type != DMP_DESC_EOT) {
862 		val = brcmf_chip_dmp_get_desc(ci, &eromaddr, &desc_type);
863 		if (!(val & DMP_DESC_VALID))
864 			continue;
865 
866 		if (desc_type == DMP_DESC_EMPTY)
867 			continue;
868 
869 		/* need a component descriptor */
870 		if (desc_type != DMP_DESC_COMPONENT)
871 			continue;
872 
873 		id = (val & DMP_COMP_PARTNUM) >> DMP_COMP_PARTNUM_S;
874 
875 		/* next descriptor must be component as well */
876 		val = brcmf_chip_dmp_get_desc(ci, &eromaddr, &desc_type);
877 		if (WARN_ON((val & DMP_DESC_TYPE_MSK) != DMP_DESC_COMPONENT))
878 			return -EFAULT;
879 
880 		/* only look at cores with master port(s) */
881 		nmw = (val & DMP_COMP_NUM_MWRAP) >> DMP_COMP_NUM_MWRAP_S;
882 		nsw = (val & DMP_COMP_NUM_SWRAP) >> DMP_COMP_NUM_SWRAP_S;
883 		rev = (val & DMP_COMP_REVISION) >> DMP_COMP_REVISION_S;
884 
885 		/* need core with ports */
886 		if (nmw + nsw == 0 &&
887 		    id != BCMA_CORE_PMU &&
888 		    id != BCMA_CORE_GCI)
889 			continue;
890 
891 		/* try to obtain register address info */
892 		err = brcmf_chip_dmp_get_regaddr(ci, &eromaddr, &base, &wrap);
893 		if (err)
894 			continue;
895 
896 		/* finally a core to be added */
897 		core = brcmf_chip_add_core(ci, id, base, wrap);
898 		if (IS_ERR(core))
899 			return PTR_ERR(core);
900 
901 		core->rev = rev;
902 	}
903 
904 	return 0;
905 }
906 
907 static int brcmf_chip_recognition(struct brcmf_chip_priv *ci)
908 {
909 	struct brcmf_core *core;
910 	u32 regdata;
911 	u32 socitype;
912 	int ret;
913 
914 	/* Get CC core rev
915 	 * Chipid is assume to be at offset 0 from SI_ENUM_BASE
916 	 * For different chiptypes or old sdio hosts w/o chipcommon,
917 	 * other ways of recognition should be added here.
918 	 */
919 	regdata = ci->ops->read32(ci->ctx, CORE_CC_REG(SI_ENUM_BASE, chipid));
920 	ci->pub.chip = regdata & CID_ID_MASK;
921 	ci->pub.chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
922 	socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
923 
924 	brcmf_chip_name(ci->pub.chip, ci->pub.chiprev,
925 			ci->pub.name, sizeof(ci->pub.name));
926 	brcmf_dbg(INFO, "found %s chip: %s\n",
927 		  socitype == SOCI_SB ? "SB" : "AXI", ci->pub.name);
928 
929 	if (socitype == SOCI_SB) {
930 		if (ci->pub.chip != BRCM_CC_4329_CHIP_ID) {
931 			brcmf_err("SB chip is not supported\n");
932 			return -ENODEV;
933 		}
934 		ci->iscoreup = brcmf_chip_sb_iscoreup;
935 		ci->coredisable = brcmf_chip_sb_coredisable;
936 		ci->resetcore = brcmf_chip_sb_resetcore;
937 
938 		core = brcmf_chip_add_core(ci, BCMA_CORE_CHIPCOMMON,
939 					   SI_ENUM_BASE, 0);
940 		brcmf_chip_sb_corerev(ci, core);
941 		core = brcmf_chip_add_core(ci, BCMA_CORE_SDIO_DEV,
942 					   BCM4329_CORE_BUS_BASE, 0);
943 		brcmf_chip_sb_corerev(ci, core);
944 		core = brcmf_chip_add_core(ci, BCMA_CORE_INTERNAL_MEM,
945 					   BCM4329_CORE_SOCRAM_BASE, 0);
946 		brcmf_chip_sb_corerev(ci, core);
947 		core = brcmf_chip_add_core(ci, BCMA_CORE_ARM_CM3,
948 					   BCM4329_CORE_ARM_BASE, 0);
949 		brcmf_chip_sb_corerev(ci, core);
950 
951 		core = brcmf_chip_add_core(ci, BCMA_CORE_80211, 0x18001000, 0);
952 		brcmf_chip_sb_corerev(ci, core);
953 	} else if (socitype == SOCI_AI) {
954 		ci->iscoreup = brcmf_chip_ai_iscoreup;
955 		ci->coredisable = brcmf_chip_ai_coredisable;
956 		ci->resetcore = brcmf_chip_ai_resetcore;
957 
958 		brcmf_chip_dmp_erom_scan(ci);
959 	} else {
960 		brcmf_err("chip backplane type %u is not supported\n",
961 			  socitype);
962 		return -ENODEV;
963 	}
964 
965 	ret = brcmf_chip_cores_check(ci);
966 	if (ret)
967 		return ret;
968 
969 	/* assure chip is passive for core access */
970 	brcmf_chip_set_passive(&ci->pub);
971 
972 	/* Call bus specific reset function now. Cores have been determined
973 	 * but further access may require a chip specific reset at this point.
974 	 */
975 	if (ci->ops->reset) {
976 		ci->ops->reset(ci->ctx, &ci->pub);
977 		brcmf_chip_set_passive(&ci->pub);
978 	}
979 
980 	return brcmf_chip_get_raminfo(&ci->pub);
981 }
982 
983 static void brcmf_chip_disable_arm(struct brcmf_chip_priv *chip, u16 id)
984 {
985 	struct brcmf_core *core;
986 	struct brcmf_core_priv *cpu;
987 	u32 val;
988 
989 
990 	core = brcmf_chip_get_core(&chip->pub, id);
991 	if (!core)
992 		return;
993 
994 	switch (id) {
995 	case BCMA_CORE_ARM_CM3:
996 		brcmf_chip_coredisable(core, 0, 0);
997 		break;
998 	case BCMA_CORE_ARM_CR4:
999 	case BCMA_CORE_ARM_CA7:
1000 		cpu = container_of(core, struct brcmf_core_priv, pub);
1001 
1002 		/* clear all IOCTL bits except HALT bit */
1003 		val = chip->ops->read32(chip->ctx, cpu->wrapbase + BCMA_IOCTL);
1004 		val &= ARMCR4_BCMA_IOCTL_CPUHALT;
1005 		brcmf_chip_resetcore(core, val, ARMCR4_BCMA_IOCTL_CPUHALT,
1006 				     ARMCR4_BCMA_IOCTL_CPUHALT);
1007 		break;
1008 	default:
1009 		brcmf_err("unknown id: %u\n", id);
1010 		break;
1011 	}
1012 }
1013 
1014 static int brcmf_chip_setup(struct brcmf_chip_priv *chip)
1015 {
1016 	struct brcmf_chip *pub;
1017 	struct brcmf_core_priv *cc;
1018 	struct brcmf_core *pmu;
1019 	u32 base;
1020 	u32 val;
1021 	int ret = 0;
1022 
1023 	pub = &chip->pub;
1024 	cc = list_first_entry(&chip->cores, struct brcmf_core_priv, list);
1025 	base = cc->pub.base;
1026 
1027 	/* get chipcommon capabilites */
1028 	pub->cc_caps = chip->ops->read32(chip->ctx,
1029 					 CORE_CC_REG(base, capabilities));
1030 	pub->cc_caps_ext = chip->ops->read32(chip->ctx,
1031 					     CORE_CC_REG(base,
1032 							 capabilities_ext));
1033 
1034 	/* get pmu caps & rev */
1035 	pmu = brcmf_chip_get_pmu(pub); /* after reading cc_caps_ext */
1036 	if (pub->cc_caps & CC_CAP_PMU) {
1037 		val = chip->ops->read32(chip->ctx,
1038 					CORE_CC_REG(pmu->base, pmucapabilities));
1039 		pub->pmurev = val & PCAP_REV_MASK;
1040 		pub->pmucaps = val;
1041 	}
1042 
1043 	brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, pmucaps=0x%x\n",
1044 		  cc->pub.rev, pub->pmurev, pub->pmucaps);
1045 
1046 	/* execute bus core specific setup */
1047 	if (chip->ops->setup)
1048 		ret = chip->ops->setup(chip->ctx, pub);
1049 
1050 	return ret;
1051 }
1052 
1053 struct brcmf_chip *brcmf_chip_attach(void *ctx,
1054 				     const struct brcmf_buscore_ops *ops)
1055 {
1056 	struct brcmf_chip_priv *chip;
1057 	int err = 0;
1058 
1059 	if (WARN_ON(!ops->read32))
1060 		err = -EINVAL;
1061 	if (WARN_ON(!ops->write32))
1062 		err = -EINVAL;
1063 	if (WARN_ON(!ops->prepare))
1064 		err = -EINVAL;
1065 	if (WARN_ON(!ops->activate))
1066 		err = -EINVAL;
1067 	if (err < 0)
1068 		return ERR_PTR(-EINVAL);
1069 
1070 	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
1071 	if (!chip)
1072 		return ERR_PTR(-ENOMEM);
1073 
1074 	INIT_LIST_HEAD(&chip->cores);
1075 	chip->num_cores = 0;
1076 	chip->ops = ops;
1077 	chip->ctx = ctx;
1078 
1079 	err = ops->prepare(ctx);
1080 	if (err < 0)
1081 		goto fail;
1082 
1083 	err = brcmf_chip_recognition(chip);
1084 	if (err < 0)
1085 		goto fail;
1086 
1087 	err = brcmf_chip_setup(chip);
1088 	if (err < 0)
1089 		goto fail;
1090 
1091 	return &chip->pub;
1092 
1093 fail:
1094 	brcmf_chip_detach(&chip->pub);
1095 	return ERR_PTR(err);
1096 }
1097 
1098 void brcmf_chip_detach(struct brcmf_chip *pub)
1099 {
1100 	struct brcmf_chip_priv *chip;
1101 	struct brcmf_core_priv *core;
1102 	struct brcmf_core_priv *tmp;
1103 
1104 	chip = container_of(pub, struct brcmf_chip_priv, pub);
1105 	list_for_each_entry_safe(core, tmp, &chip->cores, list) {
1106 		list_del(&core->list);
1107 		kfree(core);
1108 	}
1109 	kfree(chip);
1110 }
1111 
1112 struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *pub, u16 coreid)
1113 {
1114 	struct brcmf_chip_priv *chip;
1115 	struct brcmf_core_priv *core;
1116 
1117 	chip = container_of(pub, struct brcmf_chip_priv, pub);
1118 	list_for_each_entry(core, &chip->cores, list)
1119 		if (core->pub.id == coreid)
1120 			return &core->pub;
1121 
1122 	return NULL;
1123 }
1124 
1125 struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *pub)
1126 {
1127 	struct brcmf_chip_priv *chip;
1128 	struct brcmf_core_priv *cc;
1129 
1130 	chip = container_of(pub, struct brcmf_chip_priv, pub);
1131 	cc = list_first_entry(&chip->cores, struct brcmf_core_priv, list);
1132 	if (WARN_ON(!cc || cc->pub.id != BCMA_CORE_CHIPCOMMON))
1133 		return brcmf_chip_get_core(pub, BCMA_CORE_CHIPCOMMON);
1134 	return &cc->pub;
1135 }
1136 
1137 struct brcmf_core *brcmf_chip_get_pmu(struct brcmf_chip *pub)
1138 {
1139 	struct brcmf_core *cc = brcmf_chip_get_chipcommon(pub);
1140 	struct brcmf_core *pmu;
1141 
1142 	/* See if there is separated PMU core available */
1143 	if (cc->rev >= 35 &&
1144 	    pub->cc_caps_ext & BCMA_CC_CAP_EXT_AOB_PRESENT) {
1145 		pmu = brcmf_chip_get_core(pub, BCMA_CORE_PMU);
1146 		if (pmu)
1147 			return pmu;
1148 	}
1149 
1150 	/* Fallback to ChipCommon core for older hardware */
1151 	return cc;
1152 }
1153 
1154 bool brcmf_chip_iscoreup(struct brcmf_core *pub)
1155 {
1156 	struct brcmf_core_priv *core;
1157 
1158 	core = container_of(pub, struct brcmf_core_priv, pub);
1159 	return core->chip->iscoreup(core);
1160 }
1161 
1162 void brcmf_chip_coredisable(struct brcmf_core *pub, u32 prereset, u32 reset)
1163 {
1164 	struct brcmf_core_priv *core;
1165 
1166 	core = container_of(pub, struct brcmf_core_priv, pub);
1167 	core->chip->coredisable(core, prereset, reset);
1168 }
1169 
1170 void brcmf_chip_resetcore(struct brcmf_core *pub, u32 prereset, u32 reset,
1171 			  u32 postreset)
1172 {
1173 	struct brcmf_core_priv *core;
1174 
1175 	core = container_of(pub, struct brcmf_core_priv, pub);
1176 	core->chip->resetcore(core, prereset, reset, postreset);
1177 }
1178 
1179 static void
1180 brcmf_chip_cm3_set_passive(struct brcmf_chip_priv *chip)
1181 {
1182 	struct brcmf_core *core;
1183 	struct brcmf_core_priv *sr;
1184 
1185 	brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CM3);
1186 	core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211);
1187 	brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET |
1188 				   D11_BCMA_IOCTL_PHYCLOCKEN,
1189 			     D11_BCMA_IOCTL_PHYCLOCKEN,
1190 			     D11_BCMA_IOCTL_PHYCLOCKEN);
1191 	core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_INTERNAL_MEM);
1192 	brcmf_chip_resetcore(core, 0, 0, 0);
1193 
1194 	/* disable bank #3 remap for this device */
1195 	if (chip->pub.chip == BRCM_CC_43430_CHIP_ID) {
1196 		sr = container_of(core, struct brcmf_core_priv, pub);
1197 		brcmf_chip_core_write32(sr, SOCRAMREGOFFS(bankidx), 3);
1198 		brcmf_chip_core_write32(sr, SOCRAMREGOFFS(bankpda), 0);
1199 	}
1200 }
1201 
1202 static bool brcmf_chip_cm3_set_active(struct brcmf_chip_priv *chip)
1203 {
1204 	struct brcmf_core *core;
1205 
1206 	core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_INTERNAL_MEM);
1207 	if (!brcmf_chip_iscoreup(core)) {
1208 		brcmf_err("SOCRAM core is down after reset?\n");
1209 		return false;
1210 	}
1211 
1212 	chip->ops->activate(chip->ctx, &chip->pub, 0);
1213 
1214 	core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CM3);
1215 	brcmf_chip_resetcore(core, 0, 0, 0);
1216 
1217 	return true;
1218 }
1219 
1220 static inline void
1221 brcmf_chip_cr4_set_passive(struct brcmf_chip_priv *chip)
1222 {
1223 	struct brcmf_core *core;
1224 
1225 	brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CR4);
1226 
1227 	core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211);
1228 	brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET |
1229 				   D11_BCMA_IOCTL_PHYCLOCKEN,
1230 			     D11_BCMA_IOCTL_PHYCLOCKEN,
1231 			     D11_BCMA_IOCTL_PHYCLOCKEN);
1232 }
1233 
1234 static bool brcmf_chip_cr4_set_active(struct brcmf_chip_priv *chip, u32 rstvec)
1235 {
1236 	struct brcmf_core *core;
1237 
1238 	chip->ops->activate(chip->ctx, &chip->pub, rstvec);
1239 
1240 	/* restore ARM */
1241 	core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CR4);
1242 	brcmf_chip_resetcore(core, ARMCR4_BCMA_IOCTL_CPUHALT, 0, 0);
1243 
1244 	return true;
1245 }
1246 
1247 static inline void
1248 brcmf_chip_ca7_set_passive(struct brcmf_chip_priv *chip)
1249 {
1250 	struct brcmf_core *core;
1251 
1252 	brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CA7);
1253 
1254 	core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211);
1255 	brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET |
1256 				   D11_BCMA_IOCTL_PHYCLOCKEN,
1257 			     D11_BCMA_IOCTL_PHYCLOCKEN,
1258 			     D11_BCMA_IOCTL_PHYCLOCKEN);
1259 }
1260 
1261 static bool brcmf_chip_ca7_set_active(struct brcmf_chip_priv *chip, u32 rstvec)
1262 {
1263 	struct brcmf_core *core;
1264 
1265 	chip->ops->activate(chip->ctx, &chip->pub, rstvec);
1266 
1267 	/* restore ARM */
1268 	core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CA7);
1269 	brcmf_chip_resetcore(core, ARMCR4_BCMA_IOCTL_CPUHALT, 0, 0);
1270 
1271 	return true;
1272 }
1273 
1274 void brcmf_chip_set_passive(struct brcmf_chip *pub)
1275 {
1276 	struct brcmf_chip_priv *chip;
1277 	struct brcmf_core *arm;
1278 
1279 	brcmf_dbg(TRACE, "Enter\n");
1280 
1281 	chip = container_of(pub, struct brcmf_chip_priv, pub);
1282 	arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4);
1283 	if (arm) {
1284 		brcmf_chip_cr4_set_passive(chip);
1285 		return;
1286 	}
1287 	arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CA7);
1288 	if (arm) {
1289 		brcmf_chip_ca7_set_passive(chip);
1290 		return;
1291 	}
1292 	arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CM3);
1293 	if (arm) {
1294 		brcmf_chip_cm3_set_passive(chip);
1295 		return;
1296 	}
1297 }
1298 
1299 bool brcmf_chip_set_active(struct brcmf_chip *pub, u32 rstvec)
1300 {
1301 	struct brcmf_chip_priv *chip;
1302 	struct brcmf_core *arm;
1303 
1304 	brcmf_dbg(TRACE, "Enter\n");
1305 
1306 	chip = container_of(pub, struct brcmf_chip_priv, pub);
1307 	arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4);
1308 	if (arm)
1309 		return brcmf_chip_cr4_set_active(chip, rstvec);
1310 	arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CA7);
1311 	if (arm)
1312 		return brcmf_chip_ca7_set_active(chip, rstvec);
1313 	arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CM3);
1314 	if (arm)
1315 		return brcmf_chip_cm3_set_active(chip);
1316 
1317 	return false;
1318 }
1319 
1320 bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
1321 {
1322 	u32 base, addr, reg, pmu_cc3_mask = ~0;
1323 	struct brcmf_chip_priv *chip;
1324 	struct brcmf_core *pmu = brcmf_chip_get_pmu(pub);
1325 
1326 	brcmf_dbg(TRACE, "Enter\n");
1327 
1328 	/* old chips with PMU version less than 17 don't support save restore */
1329 	if (pub->pmurev < 17)
1330 		return false;
1331 
1332 	base = brcmf_chip_get_chipcommon(pub)->base;
1333 	chip = container_of(pub, struct brcmf_chip_priv, pub);
1334 
1335 	switch (pub->chip) {
1336 	case BRCM_CC_4354_CHIP_ID:
1337 	case BRCM_CC_4356_CHIP_ID:
1338 	case BRCM_CC_4345_CHIP_ID:
1339 		/* explicitly check SR engine enable bit */
1340 		pmu_cc3_mask = BIT(2);
1341 		/* fall-through */
1342 	case BRCM_CC_43241_CHIP_ID:
1343 	case BRCM_CC_4335_CHIP_ID:
1344 	case BRCM_CC_4339_CHIP_ID:
1345 		/* read PMU chipcontrol register 3 */
1346 		addr = CORE_CC_REG(pmu->base, chipcontrol_addr);
1347 		chip->ops->write32(chip->ctx, addr, 3);
1348 		addr = CORE_CC_REG(pmu->base, chipcontrol_data);
1349 		reg = chip->ops->read32(chip->ctx, addr);
1350 		return (reg & pmu_cc3_mask) != 0;
1351 	case BRCM_CC_43430_CHIP_ID:
1352 		addr = CORE_CC_REG(base, sr_control1);
1353 		reg = chip->ops->read32(chip->ctx, addr);
1354 		return reg != 0;
1355 	case CY_CC_4373_CHIP_ID:
1356 		/* explicitly check SR engine enable bit */
1357 		addr = CORE_CC_REG(base, sr_control0);
1358 		reg = chip->ops->read32(chip->ctx, addr);
1359 		return (reg & CC_SR_CTL0_ENABLE_MASK) != 0;
1360 	case CY_CC_43012_CHIP_ID:
1361 		addr = CORE_CC_REG(pmu->base, retention_ctl);
1362 		reg = chip->ops->read32(chip->ctx, addr);
1363 		return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK |
1364 			       PMU_RCTL_LOGIC_DISABLE_MASK)) == 0;
1365 	default:
1366 		addr = CORE_CC_REG(pmu->base, pmucapabilities_ext);
1367 		reg = chip->ops->read32(chip->ctx, addr);
1368 		if ((reg & PCAPEXT_SR_SUPPORTED_MASK) == 0)
1369 			return false;
1370 
1371 		addr = CORE_CC_REG(pmu->base, retention_ctl);
1372 		reg = chip->ops->read32(chip->ctx, addr);
1373 		return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK |
1374 			       PMU_RCTL_LOGIC_DISABLE_MASK)) == 0;
1375 	}
1376 }
1377