xref: /linux/drivers/remoteproc/qcom_q6v5_mss.c (revision d6fd48ef)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Qualcomm self-authenticating modem subsystem remoteproc driver
4  *
5  * Copyright (C) 2016 Linaro Ltd.
6  * Copyright (C) 2014 Sony Mobile Communications AB
7  * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
8  */
9 
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/devcoredump.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/mfd/syscon.h>
17 #include <linux/module.h>
18 #include <linux/of_address.h>
19 #include <linux/of_device.h>
20 #include <linux/of_reserved_mem.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_domain.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/regmap.h>
25 #include <linux/regulator/consumer.h>
26 #include <linux/remoteproc.h>
27 #include <linux/reset.h>
28 #include <linux/soc/qcom/mdt_loader.h>
29 #include <linux/iopoll.h>
30 #include <linux/slab.h>
31 
32 #include "remoteproc_internal.h"
33 #include "qcom_common.h"
34 #include "qcom_pil_info.h"
35 #include "qcom_q6v5.h"
36 
37 #include <linux/firmware/qcom/qcom_scm.h>
38 
39 #define MPSS_CRASH_REASON_SMEM		421
40 
41 #define MBA_LOG_SIZE			SZ_4K
42 
43 #define MPSS_PAS_ID			5
44 
45 /* RMB Status Register Values */
46 #define RMB_PBL_SUCCESS			0x1
47 
48 #define RMB_MBA_XPU_UNLOCKED		0x1
49 #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED	0x2
50 #define RMB_MBA_META_DATA_AUTH_SUCCESS	0x3
51 #define RMB_MBA_AUTH_COMPLETE		0x4
52 
53 /* PBL/MBA interface registers */
54 #define RMB_MBA_IMAGE_REG		0x00
55 #define RMB_PBL_STATUS_REG		0x04
56 #define RMB_MBA_COMMAND_REG		0x08
57 #define RMB_MBA_STATUS_REG		0x0C
58 #define RMB_PMI_META_DATA_REG		0x10
59 #define RMB_PMI_CODE_START_REG		0x14
60 #define RMB_PMI_CODE_LENGTH_REG		0x18
61 #define RMB_MBA_MSS_STATUS		0x40
62 #define RMB_MBA_ALT_RESET		0x44
63 
64 #define RMB_CMD_META_DATA_READY		0x1
65 #define RMB_CMD_LOAD_READY		0x2
66 
67 /* QDSP6SS Register Offsets */
68 #define QDSP6SS_RESET_REG		0x014
69 #define QDSP6SS_GFMUX_CTL_REG		0x020
70 #define QDSP6SS_PWR_CTL_REG		0x030
71 #define QDSP6SS_MEM_PWR_CTL		0x0B0
72 #define QDSP6V6SS_MEM_PWR_CTL		0x034
73 #define QDSP6SS_STRAP_ACC		0x110
74 
75 /* AXI Halt Register Offsets */
76 #define AXI_HALTREQ_REG			0x0
77 #define AXI_HALTACK_REG			0x4
78 #define AXI_IDLE_REG			0x8
79 #define AXI_GATING_VALID_OVERRIDE	BIT(0)
80 
81 #define HALT_ACK_TIMEOUT_US		100000
82 
83 /* QACCEPT Register Offsets */
84 #define QACCEPT_ACCEPT_REG		0x0
85 #define QACCEPT_ACTIVE_REG		0x4
86 #define QACCEPT_DENY_REG		0x8
87 #define QACCEPT_REQ_REG			0xC
88 
89 #define QACCEPT_TIMEOUT_US		50
90 
91 /* QDSP6SS_RESET */
92 #define Q6SS_STOP_CORE			BIT(0)
93 #define Q6SS_CORE_ARES			BIT(1)
94 #define Q6SS_BUS_ARES_ENABLE		BIT(2)
95 
96 /* QDSP6SS CBCR */
97 #define Q6SS_CBCR_CLKEN			BIT(0)
98 #define Q6SS_CBCR_CLKOFF		BIT(31)
99 #define Q6SS_CBCR_TIMEOUT_US		200
100 
101 /* QDSP6SS_GFMUX_CTL */
102 #define Q6SS_CLK_ENABLE			BIT(1)
103 
104 /* QDSP6SS_PWR_CTL */
105 #define Q6SS_L2DATA_SLP_NRET_N_0	BIT(0)
106 #define Q6SS_L2DATA_SLP_NRET_N_1	BIT(1)
107 #define Q6SS_L2DATA_SLP_NRET_N_2	BIT(2)
108 #define Q6SS_L2TAG_SLP_NRET_N		BIT(16)
109 #define Q6SS_ETB_SLP_NRET_N		BIT(17)
110 #define Q6SS_L2DATA_STBY_N		BIT(18)
111 #define Q6SS_SLP_RET_N			BIT(19)
112 #define Q6SS_CLAMP_IO			BIT(20)
113 #define QDSS_BHS_ON			BIT(21)
114 #define QDSS_LDO_BYP			BIT(22)
115 
116 /* QDSP6v55 parameters */
117 #define QDSP6V55_MEM_BITS		GENMASK(16, 8)
118 
119 /* QDSP6v56 parameters */
120 #define QDSP6v56_LDO_BYP		BIT(25)
121 #define QDSP6v56_BHS_ON		BIT(24)
122 #define QDSP6v56_CLAMP_WL		BIT(21)
123 #define QDSP6v56_CLAMP_QMC_MEM		BIT(22)
124 #define QDSP6SS_XO_CBCR		0x0038
125 #define QDSP6SS_ACC_OVERRIDE_VAL		0x20
126 
127 /* QDSP6v65 parameters */
128 #define QDSP6SS_CORE_CBCR		0x20
129 #define QDSP6SS_SLEEP                   0x3C
130 #define QDSP6SS_BOOT_CORE_START         0x400
131 #define QDSP6SS_BOOT_CMD                0x404
132 #define BOOT_FSM_TIMEOUT                10000
133 
134 struct reg_info {
135 	struct regulator *reg;
136 	int uV;
137 	int uA;
138 };
139 
140 struct qcom_mss_reg_res {
141 	const char *supply;
142 	int uV;
143 	int uA;
144 };
145 
146 struct rproc_hexagon_res {
147 	const char *hexagon_mba_image;
148 	struct qcom_mss_reg_res *proxy_supply;
149 	struct qcom_mss_reg_res *fallback_proxy_supply;
150 	struct qcom_mss_reg_res *active_supply;
151 	char **proxy_clk_names;
152 	char **reset_clk_names;
153 	char **active_clk_names;
154 	char **proxy_pd_names;
155 	int version;
156 	bool need_mem_protection;
157 	bool has_alt_reset;
158 	bool has_mba_logs;
159 	bool has_spare_reg;
160 	bool has_qaccept_regs;
161 	bool has_ext_cntl_regs;
162 	bool has_vq6;
163 };
164 
165 struct q6v5 {
166 	struct device *dev;
167 	struct rproc *rproc;
168 
169 	void __iomem *reg_base;
170 	void __iomem *rmb_base;
171 
172 	struct regmap *halt_map;
173 	struct regmap *conn_map;
174 
175 	u32 halt_q6;
176 	u32 halt_modem;
177 	u32 halt_nc;
178 	u32 halt_vq6;
179 	u32 conn_box;
180 
181 	u32 qaccept_mdm;
182 	u32 qaccept_cx;
183 	u32 qaccept_axi;
184 
185 	u32 axim1_clk_off;
186 	u32 crypto_clk_off;
187 	u32 force_clk_on;
188 	u32 rscc_disable;
189 
190 	struct reset_control *mss_restart;
191 	struct reset_control *pdc_reset;
192 
193 	struct qcom_q6v5 q6v5;
194 
195 	struct clk *active_clks[8];
196 	struct clk *reset_clks[4];
197 	struct clk *proxy_clks[4];
198 	struct device *proxy_pds[3];
199 	int active_clk_count;
200 	int reset_clk_count;
201 	int proxy_clk_count;
202 	int proxy_pd_count;
203 
204 	struct reg_info active_regs[1];
205 	struct reg_info proxy_regs[1];
206 	struct reg_info fallback_proxy_regs[2];
207 	int active_reg_count;
208 	int proxy_reg_count;
209 	int fallback_proxy_reg_count;
210 
211 	bool dump_mba_loaded;
212 	size_t current_dump_size;
213 	size_t total_dump_size;
214 
215 	phys_addr_t mba_phys;
216 	size_t mba_size;
217 	size_t dp_size;
218 
219 	phys_addr_t mdata_phys;
220 	size_t mdata_size;
221 
222 	phys_addr_t mpss_phys;
223 	phys_addr_t mpss_reloc;
224 	size_t mpss_size;
225 
226 	struct qcom_rproc_glink glink_subdev;
227 	struct qcom_rproc_subdev smd_subdev;
228 	struct qcom_rproc_ssr ssr_subdev;
229 	struct qcom_sysmon *sysmon;
230 	struct platform_device *bam_dmux;
231 	bool need_mem_protection;
232 	bool has_alt_reset;
233 	bool has_mba_logs;
234 	bool has_spare_reg;
235 	bool has_qaccept_regs;
236 	bool has_ext_cntl_regs;
237 	bool has_vq6;
238 	int mpss_perm;
239 	int mba_perm;
240 	const char *hexagon_mdt_image;
241 	int version;
242 };
243 
244 enum {
245 	MSS_MSM8909,
246 	MSS_MSM8916,
247 	MSS_MSM8953,
248 	MSS_MSM8974,
249 	MSS_MSM8996,
250 	MSS_MSM8998,
251 	MSS_SC7180,
252 	MSS_SC7280,
253 	MSS_SDM845,
254 };
255 
256 static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
257 			       const struct qcom_mss_reg_res *reg_res)
258 {
259 	int rc;
260 	int i;
261 
262 	if (!reg_res)
263 		return 0;
264 
265 	for (i = 0; reg_res[i].supply; i++) {
266 		regs[i].reg = devm_regulator_get(dev, reg_res[i].supply);
267 		if (IS_ERR(regs[i].reg)) {
268 			rc = PTR_ERR(regs[i].reg);
269 			if (rc != -EPROBE_DEFER)
270 				dev_err(dev, "Failed to get %s\n regulator",
271 					reg_res[i].supply);
272 			return rc;
273 		}
274 
275 		regs[i].uV = reg_res[i].uV;
276 		regs[i].uA = reg_res[i].uA;
277 	}
278 
279 	return i;
280 }
281 
282 static int q6v5_regulator_enable(struct q6v5 *qproc,
283 				 struct reg_info *regs, int count)
284 {
285 	int ret;
286 	int i;
287 
288 	for (i = 0; i < count; i++) {
289 		if (regs[i].uV > 0) {
290 			ret = regulator_set_voltage(regs[i].reg,
291 					regs[i].uV, INT_MAX);
292 			if (ret) {
293 				dev_err(qproc->dev,
294 					"Failed to request voltage for %d.\n",
295 						i);
296 				goto err;
297 			}
298 		}
299 
300 		if (regs[i].uA > 0) {
301 			ret = regulator_set_load(regs[i].reg,
302 						 regs[i].uA);
303 			if (ret < 0) {
304 				dev_err(qproc->dev,
305 					"Failed to set regulator mode\n");
306 				goto err;
307 			}
308 		}
309 
310 		ret = regulator_enable(regs[i].reg);
311 		if (ret) {
312 			dev_err(qproc->dev, "Regulator enable failed\n");
313 			goto err;
314 		}
315 	}
316 
317 	return 0;
318 err:
319 	for (; i >= 0; i--) {
320 		if (regs[i].uV > 0)
321 			regulator_set_voltage(regs[i].reg, 0, INT_MAX);
322 
323 		if (regs[i].uA > 0)
324 			regulator_set_load(regs[i].reg, 0);
325 
326 		regulator_disable(regs[i].reg);
327 	}
328 
329 	return ret;
330 }
331 
332 static void q6v5_regulator_disable(struct q6v5 *qproc,
333 				   struct reg_info *regs, int count)
334 {
335 	int i;
336 
337 	for (i = 0; i < count; i++) {
338 		if (regs[i].uV > 0)
339 			regulator_set_voltage(regs[i].reg, 0, INT_MAX);
340 
341 		if (regs[i].uA > 0)
342 			regulator_set_load(regs[i].reg, 0);
343 
344 		regulator_disable(regs[i].reg);
345 	}
346 }
347 
348 static int q6v5_clk_enable(struct device *dev,
349 			   struct clk **clks, int count)
350 {
351 	int rc;
352 	int i;
353 
354 	for (i = 0; i < count; i++) {
355 		rc = clk_prepare_enable(clks[i]);
356 		if (rc) {
357 			dev_err(dev, "Clock enable failed\n");
358 			goto err;
359 		}
360 	}
361 
362 	return 0;
363 err:
364 	for (i--; i >= 0; i--)
365 		clk_disable_unprepare(clks[i]);
366 
367 	return rc;
368 }
369 
370 static void q6v5_clk_disable(struct device *dev,
371 			     struct clk **clks, int count)
372 {
373 	int i;
374 
375 	for (i = 0; i < count; i++)
376 		clk_disable_unprepare(clks[i]);
377 }
378 
379 static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds,
380 			   size_t pd_count)
381 {
382 	int ret;
383 	int i;
384 
385 	for (i = 0; i < pd_count; i++) {
386 		dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
387 		ret = pm_runtime_get_sync(pds[i]);
388 		if (ret < 0) {
389 			pm_runtime_put_noidle(pds[i]);
390 			dev_pm_genpd_set_performance_state(pds[i], 0);
391 			goto unroll_pd_votes;
392 		}
393 	}
394 
395 	return 0;
396 
397 unroll_pd_votes:
398 	for (i--; i >= 0; i--) {
399 		dev_pm_genpd_set_performance_state(pds[i], 0);
400 		pm_runtime_put(pds[i]);
401 	}
402 
403 	return ret;
404 }
405 
406 static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds,
407 			     size_t pd_count)
408 {
409 	int i;
410 
411 	for (i = 0; i < pd_count; i++) {
412 		dev_pm_genpd_set_performance_state(pds[i], 0);
413 		pm_runtime_put(pds[i]);
414 	}
415 }
416 
417 static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
418 				   bool local, bool remote, phys_addr_t addr,
419 				   size_t size)
420 {
421 	struct qcom_scm_vmperm next[2];
422 	int perms = 0;
423 
424 	if (!qproc->need_mem_protection)
425 		return 0;
426 
427 	if (local == !!(*current_perm & BIT(QCOM_SCM_VMID_HLOS)) &&
428 	    remote == !!(*current_perm & BIT(QCOM_SCM_VMID_MSS_MSA)))
429 		return 0;
430 
431 	if (local) {
432 		next[perms].vmid = QCOM_SCM_VMID_HLOS;
433 		next[perms].perm = QCOM_SCM_PERM_RWX;
434 		perms++;
435 	}
436 
437 	if (remote) {
438 		next[perms].vmid = QCOM_SCM_VMID_MSS_MSA;
439 		next[perms].perm = QCOM_SCM_PERM_RW;
440 		perms++;
441 	}
442 
443 	return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K),
444 				   current_perm, next, perms);
445 }
446 
447 static void q6v5_debug_policy_load(struct q6v5 *qproc, void *mba_region)
448 {
449 	const struct firmware *dp_fw;
450 
451 	if (request_firmware_direct(&dp_fw, "msadp", qproc->dev))
452 		return;
453 
454 	if (SZ_1M + dp_fw->size <= qproc->mba_size) {
455 		memcpy(mba_region + SZ_1M, dp_fw->data, dp_fw->size);
456 		qproc->dp_size = dp_fw->size;
457 	}
458 
459 	release_firmware(dp_fw);
460 }
461 
462 static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
463 {
464 	struct q6v5 *qproc = rproc->priv;
465 	void *mba_region;
466 
467 	/* MBA is restricted to a maximum size of 1M */
468 	if (fw->size > qproc->mba_size || fw->size > SZ_1M) {
469 		dev_err(qproc->dev, "MBA firmware load failed\n");
470 		return -EINVAL;
471 	}
472 
473 	mba_region = memremap(qproc->mba_phys, qproc->mba_size, MEMREMAP_WC);
474 	if (!mba_region) {
475 		dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
476 			&qproc->mba_phys, qproc->mba_size);
477 		return -EBUSY;
478 	}
479 
480 	memcpy(mba_region, fw->data, fw->size);
481 	q6v5_debug_policy_load(qproc, mba_region);
482 	memunmap(mba_region);
483 
484 	return 0;
485 }
486 
487 static int q6v5_reset_assert(struct q6v5 *qproc)
488 {
489 	int ret;
490 
491 	if (qproc->has_alt_reset) {
492 		reset_control_assert(qproc->pdc_reset);
493 		ret = reset_control_reset(qproc->mss_restart);
494 		reset_control_deassert(qproc->pdc_reset);
495 	} else if (qproc->has_spare_reg) {
496 		/*
497 		 * When the AXI pipeline is being reset with the Q6 modem partly
498 		 * operational there is possibility of AXI valid signal to
499 		 * glitch, leading to spurious transactions and Q6 hangs. A work
500 		 * around is employed by asserting the AXI_GATING_VALID_OVERRIDE
501 		 * BIT before triggering Q6 MSS reset. AXI_GATING_VALID_OVERRIDE
502 		 * is withdrawn post MSS assert followed by a MSS deassert,
503 		 * while holding the PDC reset.
504 		 */
505 		reset_control_assert(qproc->pdc_reset);
506 		regmap_update_bits(qproc->conn_map, qproc->conn_box,
507 				   AXI_GATING_VALID_OVERRIDE, 1);
508 		reset_control_assert(qproc->mss_restart);
509 		reset_control_deassert(qproc->pdc_reset);
510 		regmap_update_bits(qproc->conn_map, qproc->conn_box,
511 				   AXI_GATING_VALID_OVERRIDE, 0);
512 		ret = reset_control_deassert(qproc->mss_restart);
513 	} else if (qproc->has_ext_cntl_regs) {
514 		regmap_write(qproc->conn_map, qproc->rscc_disable, 0);
515 		reset_control_assert(qproc->pdc_reset);
516 		reset_control_assert(qproc->mss_restart);
517 		reset_control_deassert(qproc->pdc_reset);
518 		ret = reset_control_deassert(qproc->mss_restart);
519 	} else {
520 		ret = reset_control_assert(qproc->mss_restart);
521 	}
522 
523 	return ret;
524 }
525 
526 static int q6v5_reset_deassert(struct q6v5 *qproc)
527 {
528 	int ret;
529 
530 	if (qproc->has_alt_reset) {
531 		reset_control_assert(qproc->pdc_reset);
532 		writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET);
533 		ret = reset_control_reset(qproc->mss_restart);
534 		writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
535 		reset_control_deassert(qproc->pdc_reset);
536 	} else if (qproc->has_spare_reg || qproc->has_ext_cntl_regs) {
537 		ret = reset_control_reset(qproc->mss_restart);
538 	} else {
539 		ret = reset_control_deassert(qproc->mss_restart);
540 	}
541 
542 	return ret;
543 }
544 
545 static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms)
546 {
547 	unsigned long timeout;
548 	s32 val;
549 
550 	timeout = jiffies + msecs_to_jiffies(ms);
551 	for (;;) {
552 		val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG);
553 		if (val)
554 			break;
555 
556 		if (time_after(jiffies, timeout))
557 			return -ETIMEDOUT;
558 
559 		msleep(1);
560 	}
561 
562 	return val;
563 }
564 
565 static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
566 {
567 
568 	unsigned long timeout;
569 	s32 val;
570 
571 	timeout = jiffies + msecs_to_jiffies(ms);
572 	for (;;) {
573 		val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
574 		if (val < 0)
575 			break;
576 
577 		if (!status && val)
578 			break;
579 		else if (status && val == status)
580 			break;
581 
582 		if (time_after(jiffies, timeout))
583 			return -ETIMEDOUT;
584 
585 		msleep(1);
586 	}
587 
588 	return val;
589 }
590 
591 static void q6v5_dump_mba_logs(struct q6v5 *qproc)
592 {
593 	struct rproc *rproc = qproc->rproc;
594 	void *data;
595 	void *mba_region;
596 
597 	if (!qproc->has_mba_logs)
598 		return;
599 
600 	if (q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, qproc->mba_phys,
601 				    qproc->mba_size))
602 		return;
603 
604 	mba_region = memremap(qproc->mba_phys, qproc->mba_size, MEMREMAP_WC);
605 	if (!mba_region)
606 		return;
607 
608 	data = vmalloc(MBA_LOG_SIZE);
609 	if (data) {
610 		memcpy(data, mba_region, MBA_LOG_SIZE);
611 		dev_coredumpv(&rproc->dev, data, MBA_LOG_SIZE, GFP_KERNEL);
612 	}
613 	memunmap(mba_region);
614 }
615 
616 static int q6v5proc_reset(struct q6v5 *qproc)
617 {
618 	u32 val;
619 	int ret;
620 	int i;
621 
622 	if (qproc->version == MSS_SDM845) {
623 		val = readl(qproc->reg_base + QDSP6SS_SLEEP);
624 		val |= Q6SS_CBCR_CLKEN;
625 		writel(val, qproc->reg_base + QDSP6SS_SLEEP);
626 
627 		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
628 					 val, !(val & Q6SS_CBCR_CLKOFF), 1,
629 					 Q6SS_CBCR_TIMEOUT_US);
630 		if (ret) {
631 			dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
632 			return -ETIMEDOUT;
633 		}
634 
635 		/* De-assert QDSP6 stop core */
636 		writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
637 		/* Trigger boot FSM */
638 		writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
639 
640 		ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
641 				val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
642 		if (ret) {
643 			dev_err(qproc->dev, "Boot FSM failed to complete.\n");
644 			/* Reset the modem so that boot FSM is in reset state */
645 			q6v5_reset_deassert(qproc);
646 			return ret;
647 		}
648 
649 		goto pbl_wait;
650 	} else if (qproc->version == MSS_SC7180 || qproc->version == MSS_SC7280) {
651 		val = readl(qproc->reg_base + QDSP6SS_SLEEP);
652 		val |= Q6SS_CBCR_CLKEN;
653 		writel(val, qproc->reg_base + QDSP6SS_SLEEP);
654 
655 		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
656 					 val, !(val & Q6SS_CBCR_CLKOFF), 1,
657 					 Q6SS_CBCR_TIMEOUT_US);
658 		if (ret) {
659 			dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
660 			return -ETIMEDOUT;
661 		}
662 
663 		/* Turn on the XO clock needed for PLL setup */
664 		val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
665 		val |= Q6SS_CBCR_CLKEN;
666 		writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
667 
668 		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
669 					 val, !(val & Q6SS_CBCR_CLKOFF), 1,
670 					 Q6SS_CBCR_TIMEOUT_US);
671 		if (ret) {
672 			dev_err(qproc->dev, "QDSP6SS XO clock timed out\n");
673 			return -ETIMEDOUT;
674 		}
675 
676 		/* Configure Q6 core CBCR to auto-enable after reset sequence */
677 		val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR);
678 		val |= Q6SS_CBCR_CLKEN;
679 		writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR);
680 
681 		/* De-assert the Q6 stop core signal */
682 		writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
683 
684 		/* Wait for 10 us for any staggering logic to settle */
685 		usleep_range(10, 20);
686 
687 		/* Trigger the boot FSM to start the Q6 out-of-reset sequence */
688 		writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
689 
690 		/* Poll the MSS_STATUS for FSM completion */
691 		ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
692 					 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
693 		if (ret) {
694 			dev_err(qproc->dev, "Boot FSM failed to complete.\n");
695 			/* Reset the modem so that boot FSM is in reset state */
696 			q6v5_reset_deassert(qproc);
697 			return ret;
698 		}
699 		goto pbl_wait;
700 	} else if (qproc->version == MSS_MSM8909 ||
701 		   qproc->version == MSS_MSM8953 ||
702 		   qproc->version == MSS_MSM8996 ||
703 		   qproc->version == MSS_MSM8998) {
704 
705 		if (qproc->version != MSS_MSM8909 &&
706 		    qproc->version != MSS_MSM8953)
707 			/* Override the ACC value if required */
708 			writel(QDSP6SS_ACC_OVERRIDE_VAL,
709 			       qproc->reg_base + QDSP6SS_STRAP_ACC);
710 
711 		/* Assert resets, stop core */
712 		val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
713 		val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
714 		writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
715 
716 		/* BHS require xo cbcr to be enabled */
717 		val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
718 		val |= Q6SS_CBCR_CLKEN;
719 		writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
720 
721 		/* Read CLKOFF bit to go low indicating CLK is enabled */
722 		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
723 					 val, !(val & Q6SS_CBCR_CLKOFF), 1,
724 					 Q6SS_CBCR_TIMEOUT_US);
725 		if (ret) {
726 			dev_err(qproc->dev,
727 				"xo cbcr enabling timed out (rc:%d)\n", ret);
728 			return ret;
729 		}
730 		/* Enable power block headswitch and wait for it to stabilize */
731 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
732 		val |= QDSP6v56_BHS_ON;
733 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
734 		val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
735 		udelay(1);
736 
737 		/* Put LDO in bypass mode */
738 		val |= QDSP6v56_LDO_BYP;
739 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
740 
741 		if (qproc->version != MSS_MSM8909) {
742 			int mem_pwr_ctl;
743 
744 			/* Deassert QDSP6 compiler memory clamp */
745 			val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
746 			val &= ~QDSP6v56_CLAMP_QMC_MEM;
747 			writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
748 
749 			/* Deassert memory peripheral sleep and L2 memory standby */
750 			val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
751 			writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
752 
753 			/* Turn on L1, L2, ETB and JU memories 1 at a time */
754 			if (qproc->version == MSS_MSM8953 ||
755 			    qproc->version == MSS_MSM8996) {
756 				mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL;
757 				i = 19;
758 			} else {
759 				/* MSS_MSM8998 */
760 				mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL;
761 				i = 28;
762 			}
763 			val = readl(qproc->reg_base + mem_pwr_ctl);
764 			for (; i >= 0; i--) {
765 				val |= BIT(i);
766 				writel(val, qproc->reg_base + mem_pwr_ctl);
767 				/*
768 				 * Read back value to ensure the write is done then
769 				 * wait for 1us for both memory peripheral and data
770 				 * array to turn on.
771 				 */
772 				val |= readl(qproc->reg_base + mem_pwr_ctl);
773 				udelay(1);
774 			}
775 		} else {
776 			/* Turn on memories */
777 			val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
778 			val |= Q6SS_SLP_RET_N | Q6SS_L2DATA_STBY_N |
779 			       Q6SS_ETB_SLP_NRET_N | QDSP6V55_MEM_BITS;
780 			writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
781 
782 			/* Turn on L2 banks 1 at a time */
783 			for (i = 0; i <= 7; i++) {
784 				val |= BIT(i);
785 				writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
786 			}
787 		}
788 
789 		/* Remove word line clamp */
790 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
791 		val &= ~QDSP6v56_CLAMP_WL;
792 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
793 	} else {
794 		/* Assert resets, stop core */
795 		val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
796 		val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
797 		writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
798 
799 		/* Enable power block headswitch and wait for it to stabilize */
800 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
801 		val |= QDSS_BHS_ON | QDSS_LDO_BYP;
802 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
803 		val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
804 		udelay(1);
805 		/*
806 		 * Turn on memories. L2 banks should be done individually
807 		 * to minimize inrush current.
808 		 */
809 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
810 		val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
811 			Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
812 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
813 		val |= Q6SS_L2DATA_SLP_NRET_N_2;
814 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
815 		val |= Q6SS_L2DATA_SLP_NRET_N_1;
816 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
817 		val |= Q6SS_L2DATA_SLP_NRET_N_0;
818 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
819 	}
820 	/* Remove IO clamp */
821 	val &= ~Q6SS_CLAMP_IO;
822 	writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
823 
824 	/* Bring core out of reset */
825 	val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
826 	val &= ~Q6SS_CORE_ARES;
827 	writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
828 
829 	/* Turn on core clock */
830 	val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
831 	val |= Q6SS_CLK_ENABLE;
832 	writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
833 
834 	/* Start core execution */
835 	val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
836 	val &= ~Q6SS_STOP_CORE;
837 	writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
838 
839 pbl_wait:
840 	/* Wait for PBL status */
841 	ret = q6v5_rmb_pbl_wait(qproc, 1000);
842 	if (ret == -ETIMEDOUT) {
843 		dev_err(qproc->dev, "PBL boot timed out\n");
844 	} else if (ret != RMB_PBL_SUCCESS) {
845 		dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
846 		ret = -EINVAL;
847 	} else {
848 		ret = 0;
849 	}
850 
851 	return ret;
852 }
853 
854 static int q6v5proc_enable_qchannel(struct q6v5 *qproc, struct regmap *map, u32 offset)
855 {
856 	unsigned int val;
857 	int ret;
858 
859 	if (!qproc->has_qaccept_regs)
860 		return 0;
861 
862 	if (qproc->has_ext_cntl_regs) {
863 		regmap_write(qproc->conn_map, qproc->rscc_disable, 0);
864 		regmap_write(qproc->conn_map, qproc->force_clk_on, 1);
865 
866 		ret = regmap_read_poll_timeout(qproc->halt_map, qproc->axim1_clk_off, val,
867 					       !val, 1, Q6SS_CBCR_TIMEOUT_US);
868 		if (ret) {
869 			dev_err(qproc->dev, "failed to enable axim1 clock\n");
870 			return -ETIMEDOUT;
871 		}
872 	}
873 
874 	regmap_write(map, offset + QACCEPT_REQ_REG, 1);
875 
876 	/* Wait for accept */
877 	ret = regmap_read_poll_timeout(map, offset + QACCEPT_ACCEPT_REG, val, val, 5,
878 				       QACCEPT_TIMEOUT_US);
879 	if (ret) {
880 		dev_err(qproc->dev, "qchannel enable failed\n");
881 		return -ETIMEDOUT;
882 	}
883 
884 	return 0;
885 }
886 
887 static void q6v5proc_disable_qchannel(struct q6v5 *qproc, struct regmap *map, u32 offset)
888 {
889 	int ret;
890 	unsigned int val, retry;
891 	unsigned int nretry = 10;
892 	bool takedown_complete = false;
893 
894 	if (!qproc->has_qaccept_regs)
895 		return;
896 
897 	while (!takedown_complete && nretry) {
898 		nretry--;
899 
900 		/* Wait for active transactions to complete */
901 		regmap_read_poll_timeout(map, offset + QACCEPT_ACTIVE_REG, val, !val, 5,
902 					 QACCEPT_TIMEOUT_US);
903 
904 		/* Request Q-channel transaction takedown */
905 		regmap_write(map, offset + QACCEPT_REQ_REG, 0);
906 
907 		/*
908 		 * If the request is denied, reset the Q-channel takedown request,
909 		 * wait for active transactions to complete and retry takedown.
910 		 */
911 		retry = 10;
912 		while (retry) {
913 			usleep_range(5, 10);
914 			retry--;
915 			ret = regmap_read(map, offset + QACCEPT_DENY_REG, &val);
916 			if (!ret && val) {
917 				regmap_write(map, offset + QACCEPT_REQ_REG, 1);
918 				break;
919 			}
920 
921 			ret = regmap_read(map, offset + QACCEPT_ACCEPT_REG, &val);
922 			if (!ret && !val) {
923 				takedown_complete = true;
924 				break;
925 			}
926 		}
927 
928 		if (!retry)
929 			break;
930 	}
931 
932 	/* Rely on mss_restart to clear out pending transactions on takedown failure */
933 	if (!takedown_complete)
934 		dev_err(qproc->dev, "qchannel takedown failed\n");
935 }
936 
937 static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
938 				   struct regmap *halt_map,
939 				   u32 offset)
940 {
941 	unsigned int val;
942 	int ret;
943 
944 	/* Check if we're already idle */
945 	ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
946 	if (!ret && val)
947 		return;
948 
949 	/* Assert halt request */
950 	regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
951 
952 	/* Wait for halt */
953 	regmap_read_poll_timeout(halt_map, offset + AXI_HALTACK_REG, val,
954 				 val, 1000, HALT_ACK_TIMEOUT_US);
955 
956 	ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
957 	if (ret || !val)
958 		dev_err(qproc->dev, "port failed halt\n");
959 
960 	/* Clear halt request (port will remain halted until reset) */
961 	regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
962 }
963 
964 static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw,
965 				const char *fw_name)
966 {
967 	unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
968 	dma_addr_t phys;
969 	void *metadata;
970 	int mdata_perm;
971 	int xferop_ret;
972 	size_t size;
973 	void *ptr;
974 	int ret;
975 
976 	metadata = qcom_mdt_read_metadata(fw, &size, fw_name, qproc->dev);
977 	if (IS_ERR(metadata))
978 		return PTR_ERR(metadata);
979 
980 	if (qproc->mdata_phys) {
981 		if (size > qproc->mdata_size) {
982 			ret = -EINVAL;
983 			dev_err(qproc->dev, "metadata size outside memory range\n");
984 			goto free_metadata;
985 		}
986 
987 		phys = qproc->mdata_phys;
988 		ptr = memremap(qproc->mdata_phys, size, MEMREMAP_WC);
989 		if (!ptr) {
990 			ret = -EBUSY;
991 			dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
992 				&qproc->mdata_phys, size);
993 			goto free_metadata;
994 		}
995 	} else {
996 		ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
997 		if (!ptr) {
998 			ret = -ENOMEM;
999 			dev_err(qproc->dev, "failed to allocate mdt buffer\n");
1000 			goto free_metadata;
1001 		}
1002 	}
1003 
1004 	memcpy(ptr, metadata, size);
1005 
1006 	if (qproc->mdata_phys)
1007 		memunmap(ptr);
1008 
1009 	/* Hypervisor mapping to access metadata by modem */
1010 	mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
1011 	ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, true,
1012 				      phys, size);
1013 	if (ret) {
1014 		dev_err(qproc->dev,
1015 			"assigning Q6 access to metadata failed: %d\n", ret);
1016 		ret = -EAGAIN;
1017 		goto free_dma_attrs;
1018 	}
1019 
1020 	writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
1021 	writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
1022 
1023 	ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000);
1024 	if (ret == -ETIMEDOUT)
1025 		dev_err(qproc->dev, "MPSS header authentication timed out\n");
1026 	else if (ret < 0)
1027 		dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
1028 
1029 	/* Metadata authentication done, remove modem access */
1030 	xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, false,
1031 					     phys, size);
1032 	if (xferop_ret)
1033 		dev_warn(qproc->dev,
1034 			 "mdt buffer not reclaimed system may become unstable\n");
1035 
1036 free_dma_attrs:
1037 	if (!qproc->mdata_phys)
1038 		dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs);
1039 free_metadata:
1040 	kfree(metadata);
1041 
1042 	return ret < 0 ? ret : 0;
1043 }
1044 
1045 static bool q6v5_phdr_valid(const struct elf32_phdr *phdr)
1046 {
1047 	if (phdr->p_type != PT_LOAD)
1048 		return false;
1049 
1050 	if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
1051 		return false;
1052 
1053 	if (!phdr->p_memsz)
1054 		return false;
1055 
1056 	return true;
1057 }
1058 
1059 static int q6v5_mba_load(struct q6v5 *qproc)
1060 {
1061 	int ret;
1062 	int xfermemop_ret;
1063 	bool mba_load_err = false;
1064 
1065 	ret = qcom_q6v5_prepare(&qproc->q6v5);
1066 	if (ret)
1067 		return ret;
1068 
1069 	ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1070 	if (ret < 0) {
1071 		dev_err(qproc->dev, "failed to enable proxy power domains\n");
1072 		goto disable_irqs;
1073 	}
1074 
1075 	ret = q6v5_regulator_enable(qproc, qproc->fallback_proxy_regs,
1076 				    qproc->fallback_proxy_reg_count);
1077 	if (ret) {
1078 		dev_err(qproc->dev, "failed to enable fallback proxy supplies\n");
1079 		goto disable_proxy_pds;
1080 	}
1081 
1082 	ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
1083 				    qproc->proxy_reg_count);
1084 	if (ret) {
1085 		dev_err(qproc->dev, "failed to enable proxy supplies\n");
1086 		goto disable_fallback_proxy_reg;
1087 	}
1088 
1089 	ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
1090 			      qproc->proxy_clk_count);
1091 	if (ret) {
1092 		dev_err(qproc->dev, "failed to enable proxy clocks\n");
1093 		goto disable_proxy_reg;
1094 	}
1095 
1096 	ret = q6v5_regulator_enable(qproc, qproc->active_regs,
1097 				    qproc->active_reg_count);
1098 	if (ret) {
1099 		dev_err(qproc->dev, "failed to enable supplies\n");
1100 		goto disable_proxy_clk;
1101 	}
1102 
1103 	ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks,
1104 			      qproc->reset_clk_count);
1105 	if (ret) {
1106 		dev_err(qproc->dev, "failed to enable reset clocks\n");
1107 		goto disable_vdd;
1108 	}
1109 
1110 	ret = q6v5_reset_deassert(qproc);
1111 	if (ret) {
1112 		dev_err(qproc->dev, "failed to deassert mss restart\n");
1113 		goto disable_reset_clks;
1114 	}
1115 
1116 	ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
1117 			      qproc->active_clk_count);
1118 	if (ret) {
1119 		dev_err(qproc->dev, "failed to enable clocks\n");
1120 		goto assert_reset;
1121 	}
1122 
1123 	ret = q6v5proc_enable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi);
1124 	if (ret) {
1125 		dev_err(qproc->dev, "failed to enable axi bridge\n");
1126 		goto disable_active_clks;
1127 	}
1128 
1129 	/*
1130 	 * Some versions of the MBA firmware will upon boot wipe the MPSS region as well, so provide
1131 	 * the Q6 access to this region.
1132 	 */
1133 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true,
1134 				      qproc->mpss_phys, qproc->mpss_size);
1135 	if (ret) {
1136 		dev_err(qproc->dev, "assigning Q6 access to mpss memory failed: %d\n", ret);
1137 		goto disable_active_clks;
1138 	}
1139 
1140 	/* Assign MBA image access in DDR to q6 */
1141 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, true,
1142 				      qproc->mba_phys, qproc->mba_size);
1143 	if (ret) {
1144 		dev_err(qproc->dev,
1145 			"assigning Q6 access to mba memory failed: %d\n", ret);
1146 		goto disable_active_clks;
1147 	}
1148 
1149 	writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
1150 	if (qproc->dp_size) {
1151 		writel(qproc->mba_phys + SZ_1M, qproc->rmb_base + RMB_PMI_CODE_START_REG);
1152 		writel(qproc->dp_size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1153 	}
1154 
1155 	ret = q6v5proc_reset(qproc);
1156 	if (ret)
1157 		goto reclaim_mba;
1158 
1159 	if (qproc->has_mba_logs)
1160 		qcom_pil_info_store("mba", qproc->mba_phys, MBA_LOG_SIZE);
1161 
1162 	ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
1163 	if (ret == -ETIMEDOUT) {
1164 		dev_err(qproc->dev, "MBA boot timed out\n");
1165 		goto halt_axi_ports;
1166 	} else if (ret != RMB_MBA_XPU_UNLOCKED &&
1167 		   ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
1168 		dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
1169 		ret = -EINVAL;
1170 		goto halt_axi_ports;
1171 	}
1172 
1173 	qproc->dump_mba_loaded = true;
1174 	return 0;
1175 
1176 halt_axi_ports:
1177 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
1178 	if (qproc->has_vq6)
1179 		q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_vq6);
1180 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
1181 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
1182 	q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_mdm);
1183 	q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_cx);
1184 	q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi);
1185 	mba_load_err = true;
1186 reclaim_mba:
1187 	xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
1188 						false, qproc->mba_phys,
1189 						qproc->mba_size);
1190 	if (xfermemop_ret) {
1191 		dev_err(qproc->dev,
1192 			"Failed to reclaim mba buffer, system may become unstable\n");
1193 	} else if (mba_load_err) {
1194 		q6v5_dump_mba_logs(qproc);
1195 	}
1196 
1197 disable_active_clks:
1198 	q6v5_clk_disable(qproc->dev, qproc->active_clks,
1199 			 qproc->active_clk_count);
1200 assert_reset:
1201 	q6v5_reset_assert(qproc);
1202 disable_reset_clks:
1203 	q6v5_clk_disable(qproc->dev, qproc->reset_clks,
1204 			 qproc->reset_clk_count);
1205 disable_vdd:
1206 	q6v5_regulator_disable(qproc, qproc->active_regs,
1207 			       qproc->active_reg_count);
1208 disable_proxy_clk:
1209 	q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1210 			 qproc->proxy_clk_count);
1211 disable_proxy_reg:
1212 	q6v5_regulator_disable(qproc, qproc->proxy_regs,
1213 			       qproc->proxy_reg_count);
1214 disable_fallback_proxy_reg:
1215 	q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
1216 			       qproc->fallback_proxy_reg_count);
1217 disable_proxy_pds:
1218 	q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1219 disable_irqs:
1220 	qcom_q6v5_unprepare(&qproc->q6v5);
1221 
1222 	return ret;
1223 }
1224 
1225 static void q6v5_mba_reclaim(struct q6v5 *qproc)
1226 {
1227 	int ret;
1228 	u32 val;
1229 
1230 	qproc->dump_mba_loaded = false;
1231 	qproc->dp_size = 0;
1232 
1233 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
1234 	if (qproc->has_vq6)
1235 		q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_vq6);
1236 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
1237 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
1238 	if (qproc->version == MSS_MSM8996) {
1239 		/*
1240 		 * To avoid high MX current during LPASS/MSS restart.
1241 		 */
1242 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
1243 		val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
1244 			QDSP6v56_CLAMP_QMC_MEM;
1245 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
1246 	}
1247 
1248 	if (qproc->has_ext_cntl_regs) {
1249 		regmap_write(qproc->conn_map, qproc->rscc_disable, 1);
1250 
1251 		ret = regmap_read_poll_timeout(qproc->halt_map, qproc->axim1_clk_off, val,
1252 					       !val, 1, Q6SS_CBCR_TIMEOUT_US);
1253 		if (ret)
1254 			dev_err(qproc->dev, "failed to enable axim1 clock\n");
1255 
1256 		ret = regmap_read_poll_timeout(qproc->halt_map, qproc->crypto_clk_off, val,
1257 					       !val, 1, Q6SS_CBCR_TIMEOUT_US);
1258 		if (ret)
1259 			dev_err(qproc->dev, "failed to enable crypto clock\n");
1260 	}
1261 
1262 	q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_mdm);
1263 	q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_cx);
1264 	q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi);
1265 
1266 	q6v5_reset_assert(qproc);
1267 
1268 	q6v5_clk_disable(qproc->dev, qproc->reset_clks,
1269 			 qproc->reset_clk_count);
1270 	q6v5_clk_disable(qproc->dev, qproc->active_clks,
1271 			 qproc->active_clk_count);
1272 	q6v5_regulator_disable(qproc, qproc->active_regs,
1273 			       qproc->active_reg_count);
1274 
1275 	/* In case of failure or coredump scenario where reclaiming MBA memory
1276 	 * could not happen reclaim it here.
1277 	 */
1278 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false,
1279 				      qproc->mba_phys,
1280 				      qproc->mba_size);
1281 	WARN_ON(ret);
1282 
1283 	ret = qcom_q6v5_unprepare(&qproc->q6v5);
1284 	if (ret) {
1285 		q6v5_pds_disable(qproc, qproc->proxy_pds,
1286 				 qproc->proxy_pd_count);
1287 		q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1288 				 qproc->proxy_clk_count);
1289 		q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
1290 				       qproc->fallback_proxy_reg_count);
1291 		q6v5_regulator_disable(qproc, qproc->proxy_regs,
1292 				       qproc->proxy_reg_count);
1293 	}
1294 }
1295 
1296 static int q6v5_reload_mba(struct rproc *rproc)
1297 {
1298 	struct q6v5 *qproc = rproc->priv;
1299 	const struct firmware *fw;
1300 	int ret;
1301 
1302 	ret = request_firmware(&fw, rproc->firmware, qproc->dev);
1303 	if (ret < 0)
1304 		return ret;
1305 
1306 	q6v5_load(rproc, fw);
1307 	ret = q6v5_mba_load(qproc);
1308 	release_firmware(fw);
1309 
1310 	return ret;
1311 }
1312 
1313 static int q6v5_mpss_load(struct q6v5 *qproc)
1314 {
1315 	const struct elf32_phdr *phdrs;
1316 	const struct elf32_phdr *phdr;
1317 	const struct firmware *seg_fw;
1318 	const struct firmware *fw;
1319 	struct elf32_hdr *ehdr;
1320 	phys_addr_t mpss_reloc;
1321 	phys_addr_t boot_addr;
1322 	phys_addr_t min_addr = PHYS_ADDR_MAX;
1323 	phys_addr_t max_addr = 0;
1324 	u32 code_length;
1325 	bool relocate = false;
1326 	char *fw_name;
1327 	size_t fw_name_len;
1328 	ssize_t offset;
1329 	size_t size = 0;
1330 	void *ptr;
1331 	int ret;
1332 	int i;
1333 
1334 	fw_name_len = strlen(qproc->hexagon_mdt_image);
1335 	if (fw_name_len <= 4)
1336 		return -EINVAL;
1337 
1338 	fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL);
1339 	if (!fw_name)
1340 		return -ENOMEM;
1341 
1342 	ret = request_firmware(&fw, fw_name, qproc->dev);
1343 	if (ret < 0) {
1344 		dev_err(qproc->dev, "unable to load %s\n", fw_name);
1345 		goto out;
1346 	}
1347 
1348 	/* Initialize the RMB validator */
1349 	writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1350 
1351 	ret = q6v5_mpss_init_image(qproc, fw, qproc->hexagon_mdt_image);
1352 	if (ret)
1353 		goto release_firmware;
1354 
1355 	ehdr = (struct elf32_hdr *)fw->data;
1356 	phdrs = (struct elf32_phdr *)(ehdr + 1);
1357 
1358 	for (i = 0; i < ehdr->e_phnum; i++) {
1359 		phdr = &phdrs[i];
1360 
1361 		if (!q6v5_phdr_valid(phdr))
1362 			continue;
1363 
1364 		if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
1365 			relocate = true;
1366 
1367 		if (phdr->p_paddr < min_addr)
1368 			min_addr = phdr->p_paddr;
1369 
1370 		if (phdr->p_paddr + phdr->p_memsz > max_addr)
1371 			max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
1372 	}
1373 
1374 	if (qproc->version == MSS_MSM8953) {
1375 		ret = qcom_scm_pas_mem_setup(MPSS_PAS_ID, qproc->mpss_phys, qproc->mpss_size);
1376 		if (ret) {
1377 			dev_err(qproc->dev,
1378 				"setting up mpss memory failed: %d\n", ret);
1379 			goto release_firmware;
1380 		}
1381 	}
1382 
1383 	/*
1384 	 * In case of a modem subsystem restart on secure devices, the modem
1385 	 * memory can be reclaimed only after MBA is loaded.
1386 	 */
1387 	q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, false,
1388 				qproc->mpss_phys, qproc->mpss_size);
1389 
1390 	/* Share ownership between Linux and MSS, during segment loading */
1391 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, true,
1392 				      qproc->mpss_phys, qproc->mpss_size);
1393 	if (ret) {
1394 		dev_err(qproc->dev,
1395 			"assigning Q6 access to mpss memory failed: %d\n", ret);
1396 		ret = -EAGAIN;
1397 		goto release_firmware;
1398 	}
1399 
1400 	mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
1401 	qproc->mpss_reloc = mpss_reloc;
1402 	/* Load firmware segments */
1403 	for (i = 0; i < ehdr->e_phnum; i++) {
1404 		phdr = &phdrs[i];
1405 
1406 		if (!q6v5_phdr_valid(phdr))
1407 			continue;
1408 
1409 		offset = phdr->p_paddr - mpss_reloc;
1410 		if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) {
1411 			dev_err(qproc->dev, "segment outside memory range\n");
1412 			ret = -EINVAL;
1413 			goto release_firmware;
1414 		}
1415 
1416 		if (phdr->p_filesz > phdr->p_memsz) {
1417 			dev_err(qproc->dev,
1418 				"refusing to load segment %d with p_filesz > p_memsz\n",
1419 				i);
1420 			ret = -EINVAL;
1421 			goto release_firmware;
1422 		}
1423 
1424 		ptr = memremap(qproc->mpss_phys + offset, phdr->p_memsz, MEMREMAP_WC);
1425 		if (!ptr) {
1426 			dev_err(qproc->dev,
1427 				"unable to map memory region: %pa+%zx-%x\n",
1428 				&qproc->mpss_phys, offset, phdr->p_memsz);
1429 			goto release_firmware;
1430 		}
1431 
1432 		if (phdr->p_filesz && phdr->p_offset < fw->size) {
1433 			/* Firmware is large enough to be non-split */
1434 			if (phdr->p_offset + phdr->p_filesz > fw->size) {
1435 				dev_err(qproc->dev,
1436 					"failed to load segment %d from truncated file %s\n",
1437 					i, fw_name);
1438 				ret = -EINVAL;
1439 				memunmap(ptr);
1440 				goto release_firmware;
1441 			}
1442 
1443 			memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz);
1444 		} else if (phdr->p_filesz) {
1445 			/* Replace "xxx.xxx" with "xxx.bxx" */
1446 			sprintf(fw_name + fw_name_len - 3, "b%02d", i);
1447 			ret = request_firmware_into_buf(&seg_fw, fw_name, qproc->dev,
1448 							ptr, phdr->p_filesz);
1449 			if (ret) {
1450 				dev_err(qproc->dev, "failed to load %s\n", fw_name);
1451 				memunmap(ptr);
1452 				goto release_firmware;
1453 			}
1454 
1455 			if (seg_fw->size != phdr->p_filesz) {
1456 				dev_err(qproc->dev,
1457 					"failed to load segment %d from truncated file %s\n",
1458 					i, fw_name);
1459 				ret = -EINVAL;
1460 				release_firmware(seg_fw);
1461 				memunmap(ptr);
1462 				goto release_firmware;
1463 			}
1464 
1465 			release_firmware(seg_fw);
1466 		}
1467 
1468 		if (phdr->p_memsz > phdr->p_filesz) {
1469 			memset(ptr + phdr->p_filesz, 0,
1470 			       phdr->p_memsz - phdr->p_filesz);
1471 		}
1472 		memunmap(ptr);
1473 		size += phdr->p_memsz;
1474 
1475 		code_length = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1476 		if (!code_length) {
1477 			boot_addr = relocate ? qproc->mpss_phys : min_addr;
1478 			writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
1479 			writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
1480 		}
1481 		writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1482 
1483 		ret = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
1484 		if (ret < 0) {
1485 			dev_err(qproc->dev, "MPSS authentication failed: %d\n",
1486 				ret);
1487 			goto release_firmware;
1488 		}
1489 	}
1490 
1491 	/* Transfer ownership of modem ddr region to q6 */
1492 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true,
1493 				      qproc->mpss_phys, qproc->mpss_size);
1494 	if (ret) {
1495 		dev_err(qproc->dev,
1496 			"assigning Q6 access to mpss memory failed: %d\n", ret);
1497 		ret = -EAGAIN;
1498 		goto release_firmware;
1499 	}
1500 
1501 	ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
1502 	if (ret == -ETIMEDOUT)
1503 		dev_err(qproc->dev, "MPSS authentication timed out\n");
1504 	else if (ret < 0)
1505 		dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
1506 
1507 	qcom_pil_info_store("modem", qproc->mpss_phys, qproc->mpss_size);
1508 
1509 release_firmware:
1510 	release_firmware(fw);
1511 out:
1512 	kfree(fw_name);
1513 
1514 	return ret < 0 ? ret : 0;
1515 }
1516 
1517 static void qcom_q6v5_dump_segment(struct rproc *rproc,
1518 				   struct rproc_dump_segment *segment,
1519 				   void *dest, size_t cp_offset, size_t size)
1520 {
1521 	int ret = 0;
1522 	struct q6v5 *qproc = rproc->priv;
1523 	int offset = segment->da - qproc->mpss_reloc;
1524 	void *ptr = NULL;
1525 
1526 	/* Unlock mba before copying segments */
1527 	if (!qproc->dump_mba_loaded) {
1528 		ret = q6v5_reload_mba(rproc);
1529 		if (!ret) {
1530 			/* Reset ownership back to Linux to copy segments */
1531 			ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
1532 						      true, false,
1533 						      qproc->mpss_phys,
1534 						      qproc->mpss_size);
1535 		}
1536 	}
1537 
1538 	if (!ret)
1539 		ptr = memremap(qproc->mpss_phys + offset + cp_offset, size, MEMREMAP_WC);
1540 
1541 	if (ptr) {
1542 		memcpy(dest, ptr, size);
1543 		memunmap(ptr);
1544 	} else {
1545 		memset(dest, 0xff, size);
1546 	}
1547 
1548 	qproc->current_dump_size += size;
1549 
1550 	/* Reclaim mba after copying segments */
1551 	if (qproc->current_dump_size == qproc->total_dump_size) {
1552 		if (qproc->dump_mba_loaded) {
1553 			/* Try to reset ownership back to Q6 */
1554 			q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
1555 						false, true,
1556 						qproc->mpss_phys,
1557 						qproc->mpss_size);
1558 			q6v5_mba_reclaim(qproc);
1559 		}
1560 	}
1561 }
1562 
1563 static int q6v5_start(struct rproc *rproc)
1564 {
1565 	struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
1566 	int xfermemop_ret;
1567 	int ret;
1568 
1569 	ret = q6v5_mba_load(qproc);
1570 	if (ret)
1571 		return ret;
1572 
1573 	dev_info(qproc->dev, "MBA booted with%s debug policy, loading mpss\n",
1574 		 qproc->dp_size ? "" : "out");
1575 
1576 	ret = q6v5_mpss_load(qproc);
1577 	if (ret)
1578 		goto reclaim_mpss;
1579 
1580 	ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000));
1581 	if (ret == -ETIMEDOUT) {
1582 		dev_err(qproc->dev, "start timed out\n");
1583 		goto reclaim_mpss;
1584 	}
1585 
1586 	xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
1587 						false, qproc->mba_phys,
1588 						qproc->mba_size);
1589 	if (xfermemop_ret)
1590 		dev_err(qproc->dev,
1591 			"Failed to reclaim mba buffer system may become unstable\n");
1592 
1593 	/* Reset Dump Segment Mask */
1594 	qproc->current_dump_size = 0;
1595 
1596 	return 0;
1597 
1598 reclaim_mpss:
1599 	q6v5_mba_reclaim(qproc);
1600 	q6v5_dump_mba_logs(qproc);
1601 
1602 	return ret;
1603 }
1604 
1605 static int q6v5_stop(struct rproc *rproc)
1606 {
1607 	struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
1608 	int ret;
1609 
1610 	ret = qcom_q6v5_request_stop(&qproc->q6v5, qproc->sysmon);
1611 	if (ret == -ETIMEDOUT)
1612 		dev_err(qproc->dev, "timed out on wait\n");
1613 
1614 	q6v5_mba_reclaim(qproc);
1615 
1616 	return 0;
1617 }
1618 
1619 static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
1620 					    const struct firmware *mba_fw)
1621 {
1622 	const struct firmware *fw;
1623 	const struct elf32_phdr *phdrs;
1624 	const struct elf32_phdr *phdr;
1625 	const struct elf32_hdr *ehdr;
1626 	struct q6v5 *qproc = rproc->priv;
1627 	unsigned long i;
1628 	int ret;
1629 
1630 	ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev);
1631 	if (ret < 0) {
1632 		dev_err(qproc->dev, "unable to load %s\n",
1633 			qproc->hexagon_mdt_image);
1634 		return ret;
1635 	}
1636 
1637 	rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
1638 
1639 	ehdr = (struct elf32_hdr *)fw->data;
1640 	phdrs = (struct elf32_phdr *)(ehdr + 1);
1641 	qproc->total_dump_size = 0;
1642 
1643 	for (i = 0; i < ehdr->e_phnum; i++) {
1644 		phdr = &phdrs[i];
1645 
1646 		if (!q6v5_phdr_valid(phdr))
1647 			continue;
1648 
1649 		ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr,
1650 							phdr->p_memsz,
1651 							qcom_q6v5_dump_segment,
1652 							NULL);
1653 		if (ret)
1654 			break;
1655 
1656 		qproc->total_dump_size += phdr->p_memsz;
1657 	}
1658 
1659 	release_firmware(fw);
1660 	return ret;
1661 }
1662 
1663 static unsigned long q6v5_panic(struct rproc *rproc)
1664 {
1665 	struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
1666 
1667 	return qcom_q6v5_panic(&qproc->q6v5);
1668 }
1669 
1670 static const struct rproc_ops q6v5_ops = {
1671 	.start = q6v5_start,
1672 	.stop = q6v5_stop,
1673 	.parse_fw = qcom_q6v5_register_dump_segments,
1674 	.load = q6v5_load,
1675 	.panic = q6v5_panic,
1676 };
1677 
1678 static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
1679 {
1680 	struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5);
1681 
1682 	q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1683 			 qproc->proxy_clk_count);
1684 	q6v5_regulator_disable(qproc, qproc->proxy_regs,
1685 			       qproc->proxy_reg_count);
1686 	q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
1687 			       qproc->fallback_proxy_reg_count);
1688 	q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1689 }
1690 
1691 static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
1692 {
1693 	struct of_phandle_args args;
1694 	int halt_cell_cnt = 3;
1695 	int ret;
1696 
1697 	qproc->reg_base = devm_platform_ioremap_resource_byname(pdev, "qdsp6");
1698 	if (IS_ERR(qproc->reg_base))
1699 		return PTR_ERR(qproc->reg_base);
1700 
1701 	qproc->rmb_base = devm_platform_ioremap_resource_byname(pdev, "rmb");
1702 	if (IS_ERR(qproc->rmb_base))
1703 		return PTR_ERR(qproc->rmb_base);
1704 
1705 	if (qproc->has_vq6)
1706 		halt_cell_cnt++;
1707 
1708 	ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1709 					       "qcom,halt-regs", halt_cell_cnt, 0, &args);
1710 	if (ret < 0) {
1711 		dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
1712 		return -EINVAL;
1713 	}
1714 
1715 	qproc->halt_map = syscon_node_to_regmap(args.np);
1716 	of_node_put(args.np);
1717 	if (IS_ERR(qproc->halt_map))
1718 		return PTR_ERR(qproc->halt_map);
1719 
1720 	qproc->halt_q6 = args.args[0];
1721 	qproc->halt_modem = args.args[1];
1722 	qproc->halt_nc = args.args[2];
1723 
1724 	if (qproc->has_vq6)
1725 		qproc->halt_vq6 = args.args[3];
1726 
1727 	if (qproc->has_qaccept_regs) {
1728 		ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1729 						       "qcom,qaccept-regs",
1730 						       3, 0, &args);
1731 		if (ret < 0) {
1732 			dev_err(&pdev->dev, "failed to parse qaccept-regs\n");
1733 			return -EINVAL;
1734 		}
1735 
1736 		qproc->qaccept_mdm = args.args[0];
1737 		qproc->qaccept_cx = args.args[1];
1738 		qproc->qaccept_axi = args.args[2];
1739 	}
1740 
1741 	if (qproc->has_ext_cntl_regs) {
1742 		ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1743 						       "qcom,ext-regs",
1744 						       2, 0, &args);
1745 		if (ret < 0) {
1746 			dev_err(&pdev->dev, "failed to parse ext-regs index 0\n");
1747 			return -EINVAL;
1748 		}
1749 
1750 		qproc->conn_map = syscon_node_to_regmap(args.np);
1751 		of_node_put(args.np);
1752 		if (IS_ERR(qproc->conn_map))
1753 			return PTR_ERR(qproc->conn_map);
1754 
1755 		qproc->force_clk_on = args.args[0];
1756 		qproc->rscc_disable = args.args[1];
1757 
1758 		ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1759 						       "qcom,ext-regs",
1760 						       2, 1, &args);
1761 		if (ret < 0) {
1762 			dev_err(&pdev->dev, "failed to parse ext-regs index 1\n");
1763 			return -EINVAL;
1764 		}
1765 
1766 		qproc->axim1_clk_off = args.args[0];
1767 		qproc->crypto_clk_off = args.args[1];
1768 	}
1769 
1770 	if (qproc->has_spare_reg) {
1771 		ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1772 						       "qcom,spare-regs",
1773 						       1, 0, &args);
1774 		if (ret < 0) {
1775 			dev_err(&pdev->dev, "failed to parse spare-regs\n");
1776 			return -EINVAL;
1777 		}
1778 
1779 		qproc->conn_map = syscon_node_to_regmap(args.np);
1780 		of_node_put(args.np);
1781 		if (IS_ERR(qproc->conn_map))
1782 			return PTR_ERR(qproc->conn_map);
1783 
1784 		qproc->conn_box = args.args[0];
1785 	}
1786 
1787 	return 0;
1788 }
1789 
1790 static int q6v5_init_clocks(struct device *dev, struct clk **clks,
1791 		char **clk_names)
1792 {
1793 	int i;
1794 
1795 	if (!clk_names)
1796 		return 0;
1797 
1798 	for (i = 0; clk_names[i]; i++) {
1799 		clks[i] = devm_clk_get(dev, clk_names[i]);
1800 		if (IS_ERR(clks[i])) {
1801 			int rc = PTR_ERR(clks[i]);
1802 
1803 			if (rc != -EPROBE_DEFER)
1804 				dev_err(dev, "Failed to get %s clock\n",
1805 					clk_names[i]);
1806 			return rc;
1807 		}
1808 	}
1809 
1810 	return i;
1811 }
1812 
1813 static int q6v5_pds_attach(struct device *dev, struct device **devs,
1814 			   char **pd_names)
1815 {
1816 	size_t num_pds = 0;
1817 	int ret;
1818 	int i;
1819 
1820 	if (!pd_names)
1821 		return 0;
1822 
1823 	while (pd_names[num_pds])
1824 		num_pds++;
1825 
1826 	for (i = 0; i < num_pds; i++) {
1827 		devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
1828 		if (IS_ERR_OR_NULL(devs[i])) {
1829 			ret = PTR_ERR(devs[i]) ? : -ENODATA;
1830 			goto unroll_attach;
1831 		}
1832 	}
1833 
1834 	return num_pds;
1835 
1836 unroll_attach:
1837 	for (i--; i >= 0; i--)
1838 		dev_pm_domain_detach(devs[i], false);
1839 
1840 	return ret;
1841 }
1842 
1843 static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds,
1844 			    size_t pd_count)
1845 {
1846 	int i;
1847 
1848 	for (i = 0; i < pd_count; i++)
1849 		dev_pm_domain_detach(pds[i], false);
1850 }
1851 
1852 static int q6v5_init_reset(struct q6v5 *qproc)
1853 {
1854 	qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,
1855 							      "mss_restart");
1856 	if (IS_ERR(qproc->mss_restart)) {
1857 		dev_err(qproc->dev, "failed to acquire mss restart\n");
1858 		return PTR_ERR(qproc->mss_restart);
1859 	}
1860 
1861 	if (qproc->has_alt_reset || qproc->has_spare_reg || qproc->has_ext_cntl_regs) {
1862 		qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev,
1863 								    "pdc_reset");
1864 		if (IS_ERR(qproc->pdc_reset)) {
1865 			dev_err(qproc->dev, "failed to acquire pdc reset\n");
1866 			return PTR_ERR(qproc->pdc_reset);
1867 		}
1868 	}
1869 
1870 	return 0;
1871 }
1872 
1873 static int q6v5_alloc_memory_region(struct q6v5 *qproc)
1874 {
1875 	struct device_node *child;
1876 	struct reserved_mem *rmem;
1877 	struct device_node *node;
1878 	struct resource r;
1879 	int ret;
1880 
1881 	/*
1882 	 * In the absence of mba/mpss sub-child, extract the mba and mpss
1883 	 * reserved memory regions from device's memory-region property.
1884 	 */
1885 	child = of_get_child_by_name(qproc->dev->of_node, "mba");
1886 	if (!child) {
1887 		node = of_parse_phandle(qproc->dev->of_node,
1888 					"memory-region", 0);
1889 	} else {
1890 		node = of_parse_phandle(child, "memory-region", 0);
1891 		of_node_put(child);
1892 	}
1893 
1894 	ret = of_address_to_resource(node, 0, &r);
1895 	of_node_put(node);
1896 	if (ret) {
1897 		dev_err(qproc->dev, "unable to resolve mba region\n");
1898 		return ret;
1899 	}
1900 
1901 	qproc->mba_phys = r.start;
1902 	qproc->mba_size = resource_size(&r);
1903 
1904 	if (!child) {
1905 		node = of_parse_phandle(qproc->dev->of_node,
1906 					"memory-region", 1);
1907 	} else {
1908 		child = of_get_child_by_name(qproc->dev->of_node, "mpss");
1909 		node = of_parse_phandle(child, "memory-region", 0);
1910 		of_node_put(child);
1911 	}
1912 
1913 	ret = of_address_to_resource(node, 0, &r);
1914 	of_node_put(node);
1915 	if (ret) {
1916 		dev_err(qproc->dev, "unable to resolve mpss region\n");
1917 		return ret;
1918 	}
1919 
1920 	qproc->mpss_phys = qproc->mpss_reloc = r.start;
1921 	qproc->mpss_size = resource_size(&r);
1922 
1923 	if (!child) {
1924 		node = of_parse_phandle(qproc->dev->of_node, "memory-region", 2);
1925 	} else {
1926 		child = of_get_child_by_name(qproc->dev->of_node, "metadata");
1927 		node = of_parse_phandle(child, "memory-region", 0);
1928 		of_node_put(child);
1929 	}
1930 
1931 	if (!node)
1932 		return 0;
1933 
1934 	rmem = of_reserved_mem_lookup(node);
1935 	if (!rmem) {
1936 		dev_err(qproc->dev, "unable to resolve metadata region\n");
1937 		return -EINVAL;
1938 	}
1939 
1940 	qproc->mdata_phys = rmem->base;
1941 	qproc->mdata_size = rmem->size;
1942 
1943 	return 0;
1944 }
1945 
1946 static int q6v5_probe(struct platform_device *pdev)
1947 {
1948 	const struct rproc_hexagon_res *desc;
1949 	struct device_node *node;
1950 	struct q6v5 *qproc;
1951 	struct rproc *rproc;
1952 	const char *mba_image;
1953 	int ret;
1954 
1955 	desc = of_device_get_match_data(&pdev->dev);
1956 	if (!desc)
1957 		return -EINVAL;
1958 
1959 	if (desc->need_mem_protection && !qcom_scm_is_available())
1960 		return -EPROBE_DEFER;
1961 
1962 	mba_image = desc->hexagon_mba_image;
1963 	ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1964 					    0, &mba_image);
1965 	if (ret < 0 && ret != -EINVAL) {
1966 		dev_err(&pdev->dev, "unable to read mba firmware-name\n");
1967 		return ret;
1968 	}
1969 
1970 	rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
1971 			    mba_image, sizeof(*qproc));
1972 	if (!rproc) {
1973 		dev_err(&pdev->dev, "failed to allocate rproc\n");
1974 		return -ENOMEM;
1975 	}
1976 
1977 	rproc->auto_boot = false;
1978 	rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
1979 
1980 	qproc = (struct q6v5 *)rproc->priv;
1981 	qproc->dev = &pdev->dev;
1982 	qproc->rproc = rproc;
1983 	qproc->hexagon_mdt_image = "modem.mdt";
1984 	ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1985 					    1, &qproc->hexagon_mdt_image);
1986 	if (ret < 0 && ret != -EINVAL) {
1987 		dev_err(&pdev->dev, "unable to read mpss firmware-name\n");
1988 		goto free_rproc;
1989 	}
1990 
1991 	platform_set_drvdata(pdev, qproc);
1992 
1993 	qproc->has_qaccept_regs = desc->has_qaccept_regs;
1994 	qproc->has_ext_cntl_regs = desc->has_ext_cntl_regs;
1995 	qproc->has_vq6 = desc->has_vq6;
1996 	qproc->has_spare_reg = desc->has_spare_reg;
1997 	ret = q6v5_init_mem(qproc, pdev);
1998 	if (ret)
1999 		goto free_rproc;
2000 
2001 	ret = q6v5_alloc_memory_region(qproc);
2002 	if (ret)
2003 		goto free_rproc;
2004 
2005 	ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks,
2006 			       desc->proxy_clk_names);
2007 	if (ret < 0) {
2008 		dev_err(&pdev->dev, "Failed to get proxy clocks.\n");
2009 		goto free_rproc;
2010 	}
2011 	qproc->proxy_clk_count = ret;
2012 
2013 	ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks,
2014 			       desc->reset_clk_names);
2015 	if (ret < 0) {
2016 		dev_err(&pdev->dev, "Failed to get reset clocks.\n");
2017 		goto free_rproc;
2018 	}
2019 	qproc->reset_clk_count = ret;
2020 
2021 	ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks,
2022 			       desc->active_clk_names);
2023 	if (ret < 0) {
2024 		dev_err(&pdev->dev, "Failed to get active clocks.\n");
2025 		goto free_rproc;
2026 	}
2027 	qproc->active_clk_count = ret;
2028 
2029 	ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs,
2030 				  desc->proxy_supply);
2031 	if (ret < 0) {
2032 		dev_err(&pdev->dev, "Failed to get proxy regulators.\n");
2033 		goto free_rproc;
2034 	}
2035 	qproc->proxy_reg_count = ret;
2036 
2037 	ret = q6v5_regulator_init(&pdev->dev,  qproc->active_regs,
2038 				  desc->active_supply);
2039 	if (ret < 0) {
2040 		dev_err(&pdev->dev, "Failed to get active regulators.\n");
2041 		goto free_rproc;
2042 	}
2043 	qproc->active_reg_count = ret;
2044 
2045 	ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds,
2046 			      desc->proxy_pd_names);
2047 	/* Fallback to regulators for old device trees */
2048 	if (ret == -ENODATA && desc->fallback_proxy_supply) {
2049 		ret = q6v5_regulator_init(&pdev->dev,
2050 					  qproc->fallback_proxy_regs,
2051 					  desc->fallback_proxy_supply);
2052 		if (ret < 0) {
2053 			dev_err(&pdev->dev, "Failed to get fallback proxy regulators.\n");
2054 			goto free_rproc;
2055 		}
2056 		qproc->fallback_proxy_reg_count = ret;
2057 	} else if (ret < 0) {
2058 		dev_err(&pdev->dev, "Failed to init power domains\n");
2059 		goto free_rproc;
2060 	} else {
2061 		qproc->proxy_pd_count = ret;
2062 	}
2063 
2064 	qproc->has_alt_reset = desc->has_alt_reset;
2065 	ret = q6v5_init_reset(qproc);
2066 	if (ret)
2067 		goto detach_proxy_pds;
2068 
2069 	qproc->version = desc->version;
2070 	qproc->need_mem_protection = desc->need_mem_protection;
2071 	qproc->has_mba_logs = desc->has_mba_logs;
2072 
2073 	ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM, "modem",
2074 			     qcom_msa_handover);
2075 	if (ret)
2076 		goto detach_proxy_pds;
2077 
2078 	qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
2079 	qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
2080 	qcom_add_glink_subdev(rproc, &qproc->glink_subdev, "mpss");
2081 	qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
2082 	qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
2083 	qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
2084 	if (IS_ERR(qproc->sysmon)) {
2085 		ret = PTR_ERR(qproc->sysmon);
2086 		goto remove_subdevs;
2087 	}
2088 
2089 	ret = rproc_add(rproc);
2090 	if (ret)
2091 		goto remove_sysmon_subdev;
2092 
2093 	node = of_get_compatible_child(pdev->dev.of_node, "qcom,bam-dmux");
2094 	qproc->bam_dmux = of_platform_device_create(node, NULL, &pdev->dev);
2095 	of_node_put(node);
2096 
2097 	return 0;
2098 
2099 remove_sysmon_subdev:
2100 	qcom_remove_sysmon_subdev(qproc->sysmon);
2101 remove_subdevs:
2102 	qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
2103 	qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
2104 	qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
2105 detach_proxy_pds:
2106 	q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
2107 free_rproc:
2108 	rproc_free(rproc);
2109 
2110 	return ret;
2111 }
2112 
2113 static int q6v5_remove(struct platform_device *pdev)
2114 {
2115 	struct q6v5 *qproc = platform_get_drvdata(pdev);
2116 	struct rproc *rproc = qproc->rproc;
2117 
2118 	if (qproc->bam_dmux)
2119 		of_platform_device_destroy(&qproc->bam_dmux->dev, NULL);
2120 	rproc_del(rproc);
2121 
2122 	qcom_q6v5_deinit(&qproc->q6v5);
2123 	qcom_remove_sysmon_subdev(qproc->sysmon);
2124 	qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
2125 	qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
2126 	qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
2127 
2128 	q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
2129 
2130 	rproc_free(rproc);
2131 
2132 	return 0;
2133 }
2134 
2135 static const struct rproc_hexagon_res sc7180_mss = {
2136 	.hexagon_mba_image = "mba.mbn",
2137 	.proxy_clk_names = (char*[]){
2138 		"xo",
2139 		NULL
2140 	},
2141 	.reset_clk_names = (char*[]){
2142 		"iface",
2143 		"bus",
2144 		"snoc_axi",
2145 		NULL
2146 	},
2147 	.active_clk_names = (char*[]){
2148 		"mnoc_axi",
2149 		"nav",
2150 		NULL
2151 	},
2152 	.proxy_pd_names = (char*[]){
2153 		"cx",
2154 		"mx",
2155 		"mss",
2156 		NULL
2157 	},
2158 	.need_mem_protection = true,
2159 	.has_alt_reset = false,
2160 	.has_mba_logs = true,
2161 	.has_spare_reg = true,
2162 	.has_qaccept_regs = false,
2163 	.has_ext_cntl_regs = false,
2164 	.has_vq6 = false,
2165 	.version = MSS_SC7180,
2166 };
2167 
2168 static const struct rproc_hexagon_res sc7280_mss = {
2169 	.hexagon_mba_image = "mba.mbn",
2170 	.proxy_clk_names = (char*[]){
2171 		"xo",
2172 		"pka",
2173 		NULL
2174 	},
2175 	.active_clk_names = (char*[]){
2176 		"iface",
2177 		"offline",
2178 		"snoc_axi",
2179 		NULL
2180 	},
2181 	.proxy_pd_names = (char*[]){
2182 		"cx",
2183 		"mss",
2184 		NULL
2185 	},
2186 	.need_mem_protection = true,
2187 	.has_alt_reset = false,
2188 	.has_mba_logs = true,
2189 	.has_spare_reg = false,
2190 	.has_qaccept_regs = true,
2191 	.has_ext_cntl_regs = true,
2192 	.has_vq6 = true,
2193 	.version = MSS_SC7280,
2194 };
2195 
2196 static const struct rproc_hexagon_res sdm845_mss = {
2197 	.hexagon_mba_image = "mba.mbn",
2198 	.proxy_clk_names = (char*[]){
2199 			"xo",
2200 			"prng",
2201 			NULL
2202 	},
2203 	.reset_clk_names = (char*[]){
2204 			"iface",
2205 			"snoc_axi",
2206 			NULL
2207 	},
2208 	.active_clk_names = (char*[]){
2209 			"bus",
2210 			"mem",
2211 			"gpll0_mss",
2212 			"mnoc_axi",
2213 			NULL
2214 	},
2215 	.proxy_pd_names = (char*[]){
2216 			"cx",
2217 			"mx",
2218 			"mss",
2219 			NULL
2220 	},
2221 	.need_mem_protection = true,
2222 	.has_alt_reset = true,
2223 	.has_mba_logs = false,
2224 	.has_spare_reg = false,
2225 	.has_qaccept_regs = false,
2226 	.has_ext_cntl_regs = false,
2227 	.has_vq6 = false,
2228 	.version = MSS_SDM845,
2229 };
2230 
2231 static const struct rproc_hexagon_res msm8998_mss = {
2232 	.hexagon_mba_image = "mba.mbn",
2233 	.proxy_clk_names = (char*[]){
2234 			"xo",
2235 			"qdss",
2236 			"mem",
2237 			NULL
2238 	},
2239 	.active_clk_names = (char*[]){
2240 			"iface",
2241 			"bus",
2242 			"gpll0_mss",
2243 			"mnoc_axi",
2244 			"snoc_axi",
2245 			NULL
2246 	},
2247 	.proxy_pd_names = (char*[]){
2248 			"cx",
2249 			"mx",
2250 			NULL
2251 	},
2252 	.need_mem_protection = true,
2253 	.has_alt_reset = false,
2254 	.has_mba_logs = false,
2255 	.has_spare_reg = false,
2256 	.has_qaccept_regs = false,
2257 	.has_ext_cntl_regs = false,
2258 	.has_vq6 = false,
2259 	.version = MSS_MSM8998,
2260 };
2261 
2262 static const struct rproc_hexagon_res msm8996_mss = {
2263 	.hexagon_mba_image = "mba.mbn",
2264 	.proxy_supply = (struct qcom_mss_reg_res[]) {
2265 		{
2266 			.supply = "pll",
2267 			.uA = 100000,
2268 		},
2269 		{}
2270 	},
2271 	.proxy_clk_names = (char*[]){
2272 			"xo",
2273 			"pnoc",
2274 			"qdss",
2275 			NULL
2276 	},
2277 	.active_clk_names = (char*[]){
2278 			"iface",
2279 			"bus",
2280 			"mem",
2281 			"gpll0_mss",
2282 			"snoc_axi",
2283 			"mnoc_axi",
2284 			NULL
2285 	},
2286 	.proxy_pd_names = (char*[]){
2287 			"mx",
2288 			"cx",
2289 			NULL
2290 	},
2291 	.need_mem_protection = true,
2292 	.has_alt_reset = false,
2293 	.has_mba_logs = false,
2294 	.has_spare_reg = false,
2295 	.has_qaccept_regs = false,
2296 	.has_ext_cntl_regs = false,
2297 	.has_vq6 = false,
2298 	.version = MSS_MSM8996,
2299 };
2300 
2301 static const struct rproc_hexagon_res msm8909_mss = {
2302 	.hexagon_mba_image = "mba.mbn",
2303 	.proxy_supply = (struct qcom_mss_reg_res[]) {
2304 		{
2305 			.supply = "pll",
2306 			.uA = 100000,
2307 		},
2308 		{}
2309 	},
2310 	.proxy_clk_names = (char*[]){
2311 		"xo",
2312 		NULL
2313 	},
2314 	.active_clk_names = (char*[]){
2315 		"iface",
2316 		"bus",
2317 		"mem",
2318 		NULL
2319 	},
2320 	.proxy_pd_names = (char*[]){
2321 		"mx",
2322 		"cx",
2323 		NULL
2324 	},
2325 	.need_mem_protection = false,
2326 	.has_alt_reset = false,
2327 	.has_mba_logs = false,
2328 	.has_spare_reg = false,
2329 	.has_qaccept_regs = false,
2330 	.has_ext_cntl_regs = false,
2331 	.has_vq6 = false,
2332 	.version = MSS_MSM8909,
2333 };
2334 
2335 static const struct rproc_hexagon_res msm8916_mss = {
2336 	.hexagon_mba_image = "mba.mbn",
2337 	.proxy_supply = (struct qcom_mss_reg_res[]) {
2338 		{
2339 			.supply = "pll",
2340 			.uA = 100000,
2341 		},
2342 		{}
2343 	},
2344 	.fallback_proxy_supply = (struct qcom_mss_reg_res[]) {
2345 		{
2346 			.supply = "mx",
2347 			.uV = 1050000,
2348 		},
2349 		{
2350 			.supply = "cx",
2351 			.uA = 100000,
2352 		},
2353 		{}
2354 	},
2355 	.proxy_clk_names = (char*[]){
2356 		"xo",
2357 		NULL
2358 	},
2359 	.active_clk_names = (char*[]){
2360 		"iface",
2361 		"bus",
2362 		"mem",
2363 		NULL
2364 	},
2365 	.proxy_pd_names = (char*[]){
2366 		"mx",
2367 		"cx",
2368 		NULL
2369 	},
2370 	.need_mem_protection = false,
2371 	.has_alt_reset = false,
2372 	.has_mba_logs = false,
2373 	.has_spare_reg = false,
2374 	.has_qaccept_regs = false,
2375 	.has_ext_cntl_regs = false,
2376 	.has_vq6 = false,
2377 	.version = MSS_MSM8916,
2378 };
2379 
2380 static const struct rproc_hexagon_res msm8953_mss = {
2381 	.hexagon_mba_image = "mba.mbn",
2382 	.proxy_supply = (struct qcom_mss_reg_res[]) {
2383 		{
2384 			.supply = "pll",
2385 			.uA = 100000,
2386 		},
2387 		{}
2388 	},
2389 	.proxy_clk_names = (char*[]){
2390 		"xo",
2391 		NULL
2392 	},
2393 	.active_clk_names = (char*[]){
2394 		"iface",
2395 		"bus",
2396 		"mem",
2397 		NULL
2398 	},
2399 	.proxy_pd_names = (char*[]) {
2400 		"cx",
2401 		"mx",
2402 		"mss",
2403 		NULL
2404 	},
2405 	.need_mem_protection = false,
2406 	.has_alt_reset = false,
2407 	.has_mba_logs = false,
2408 	.has_spare_reg = false,
2409 	.has_qaccept_regs = false,
2410 	.has_ext_cntl_regs = false,
2411 	.has_vq6 = false,
2412 	.version = MSS_MSM8953,
2413 };
2414 
2415 static const struct rproc_hexagon_res msm8974_mss = {
2416 	.hexagon_mba_image = "mba.b00",
2417 	.proxy_supply = (struct qcom_mss_reg_res[]) {
2418 		{
2419 			.supply = "pll",
2420 			.uA = 100000,
2421 		},
2422 		{}
2423 	},
2424 	.fallback_proxy_supply = (struct qcom_mss_reg_res[]) {
2425 		{
2426 			.supply = "mx",
2427 			.uV = 1050000,
2428 		},
2429 		{
2430 			.supply = "cx",
2431 			.uA = 100000,
2432 		},
2433 		{}
2434 	},
2435 	.active_supply = (struct qcom_mss_reg_res[]) {
2436 		{
2437 			.supply = "mss",
2438 			.uV = 1050000,
2439 			.uA = 100000,
2440 		},
2441 		{}
2442 	},
2443 	.proxy_clk_names = (char*[]){
2444 		"xo",
2445 		NULL
2446 	},
2447 	.active_clk_names = (char*[]){
2448 		"iface",
2449 		"bus",
2450 		"mem",
2451 		NULL
2452 	},
2453 	.proxy_pd_names = (char*[]){
2454 		"mx",
2455 		"cx",
2456 		NULL
2457 	},
2458 	.need_mem_protection = false,
2459 	.has_alt_reset = false,
2460 	.has_mba_logs = false,
2461 	.has_spare_reg = false,
2462 	.has_qaccept_regs = false,
2463 	.has_ext_cntl_regs = false,
2464 	.has_vq6 = false,
2465 	.version = MSS_MSM8974,
2466 };
2467 
2468 static const struct of_device_id q6v5_of_match[] = {
2469 	{ .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
2470 	{ .compatible = "qcom,msm8909-mss-pil", .data = &msm8909_mss},
2471 	{ .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
2472 	{ .compatible = "qcom,msm8953-mss-pil", .data = &msm8953_mss},
2473 	{ .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
2474 	{ .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
2475 	{ .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss},
2476 	{ .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss},
2477 	{ .compatible = "qcom,sc7280-mss-pil", .data = &sc7280_mss},
2478 	{ .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
2479 	{ },
2480 };
2481 MODULE_DEVICE_TABLE(of, q6v5_of_match);
2482 
2483 static struct platform_driver q6v5_driver = {
2484 	.probe = q6v5_probe,
2485 	.remove = q6v5_remove,
2486 	.driver = {
2487 		.name = "qcom-q6v5-mss",
2488 		.of_match_table = q6v5_of_match,
2489 	},
2490 };
2491 module_platform_driver(q6v5_driver);
2492 
2493 MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver");
2494 MODULE_LICENSE("GPL v2");
2495