1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2018 NXP
4  */
5 
6 #include <common.h>
7 #include <clk.h>
8 #include <cpu.h>
9 #include <cpu_func.h>
10 #include <dm.h>
11 #include <init.h>
12 #include <log.h>
13 #include <asm/cache.h>
14 #include <asm/global_data.h>
15 #include <dm/device-internal.h>
16 #include <dm/lists.h>
17 #include <dm/uclass.h>
18 #include <errno.h>
19 #include <spl.h>
20 #include <thermal.h>
21 #include <asm/arch/sci/sci.h>
22 #include <asm/arch/sys_proto.h>
23 #include <asm/arch-imx/cpu.h>
24 #include <asm/armv8/cpu.h>
25 #include <asm/armv8/mmu.h>
26 #include <asm/setup.h>
27 #include <asm/mach-imx/boot_mode.h>
28 #include <spl.h>
29 
30 DECLARE_GLOBAL_DATA_PTR;
31 
32 #define BT_PASSOVER_TAG	0x504F
get_pass_over_info(void)33 struct pass_over_info_t *get_pass_over_info(void)
34 {
35 	struct pass_over_info_t *p =
36 		(struct pass_over_info_t *)PASS_OVER_INFO_ADDR;
37 
38 	if (p->barker != BT_PASSOVER_TAG ||
39 	    p->len != sizeof(struct pass_over_info_t))
40 		return NULL;
41 
42 	return p;
43 }
44 
arch_cpu_init(void)45 int arch_cpu_init(void)
46 {
47 #if defined(CONFIG_SPL_BUILD) && defined(CONFIG_SPL_RECOVER_DATA_SECTION)
48 	spl_save_restore_data();
49 #endif
50 
51 #ifdef CONFIG_SPL_BUILD
52 	struct pass_over_info_t *pass_over;
53 
54 	if (is_soc_rev(CHIP_REV_A)) {
55 		pass_over = get_pass_over_info();
56 		if (pass_over && pass_over->g_ap_mu == 0) {
57 			/*
58 			 * When ap_mu is 0, means the U-Boot booted
59 			 * from first container
60 			 */
61 			sc_misc_boot_status(-1, SC_MISC_BOOT_STATUS_SUCCESS);
62 		}
63 	}
64 #endif
65 
66 	return 0;
67 }
68 
arch_cpu_init_dm(void)69 int arch_cpu_init_dm(void)
70 {
71 	struct udevice *devp;
72 	int node, ret;
73 
74 	node = fdt_node_offset_by_compatible(gd->fdt_blob, -1, "fsl,imx8-mu");
75 
76 	ret = uclass_get_device_by_of_offset(UCLASS_MISC, node, &devp);
77 	if (ret) {
78 		printf("could not get scu %d\n", ret);
79 		return ret;
80 	}
81 
82 	if (is_imx8qm()) {
83 		ret = sc_pm_set_resource_power_mode(-1, SC_R_SMMU,
84 						    SC_PM_PW_MODE_ON);
85 		if (ret)
86 			return ret;
87 	}
88 
89 	return 0;
90 }
91 
print_bootinfo(void)92 int print_bootinfo(void)
93 {
94 	enum boot_device bt_dev = get_boot_device();
95 
96 	puts("Boot:  ");
97 	switch (bt_dev) {
98 	case SD1_BOOT:
99 		puts("SD0\n");
100 		break;
101 	case SD2_BOOT:
102 		puts("SD1\n");
103 		break;
104 	case SD3_BOOT:
105 		puts("SD2\n");
106 		break;
107 	case MMC1_BOOT:
108 		puts("MMC0\n");
109 		break;
110 	case MMC2_BOOT:
111 		puts("MMC1\n");
112 		break;
113 	case MMC3_BOOT:
114 		puts("MMC2\n");
115 		break;
116 	case FLEXSPI_BOOT:
117 		puts("FLEXSPI\n");
118 		break;
119 	case SATA_BOOT:
120 		puts("SATA\n");
121 		break;
122 	case NAND_BOOT:
123 		puts("NAND\n");
124 		break;
125 	case USB_BOOT:
126 		puts("USB\n");
127 		break;
128 	default:
129 		printf("Unknown device %u\n", bt_dev);
130 		break;
131 	}
132 
133 	return 0;
134 }
135 
get_boot_device(void)136 enum boot_device get_boot_device(void)
137 {
138 	enum boot_device boot_dev = SD1_BOOT;
139 
140 	sc_rsrc_t dev_rsrc;
141 
142 	sc_misc_get_boot_dev(-1, &dev_rsrc);
143 
144 	switch (dev_rsrc) {
145 	case SC_R_SDHC_0:
146 		boot_dev = MMC1_BOOT;
147 		break;
148 	case SC_R_SDHC_1:
149 		boot_dev = SD2_BOOT;
150 		break;
151 	case SC_R_SDHC_2:
152 		boot_dev = SD3_BOOT;
153 		break;
154 	case SC_R_NAND:
155 		boot_dev = NAND_BOOT;
156 		break;
157 	case SC_R_FSPI_0:
158 		boot_dev = FLEXSPI_BOOT;
159 		break;
160 	case SC_R_SATA_0:
161 		boot_dev = SATA_BOOT;
162 		break;
163 	case SC_R_USB_0:
164 	case SC_R_USB_1:
165 	case SC_R_USB_2:
166 		boot_dev = USB_BOOT;
167 		break;
168 	default:
169 		break;
170 	}
171 
172 	return boot_dev;
173 }
174 
175 #ifdef CONFIG_SERIAL_TAG
176 #define FUSE_UNIQUE_ID_WORD0 16
177 #define FUSE_UNIQUE_ID_WORD1 17
get_board_serial(struct tag_serialnr * serialnr)178 void get_board_serial(struct tag_serialnr *serialnr)
179 {
180 	sc_err_t err;
181 	u32 val1 = 0, val2 = 0;
182 	u32 word1, word2;
183 
184 	if (!serialnr)
185 		return;
186 
187 	word1 = FUSE_UNIQUE_ID_WORD0;
188 	word2 = FUSE_UNIQUE_ID_WORD1;
189 
190 	err = sc_misc_otp_fuse_read(-1, word1, &val1);
191 	if (err != SC_ERR_NONE) {
192 		printf("%s fuse %d read error: %d\n", __func__, word1, err);
193 		return;
194 	}
195 
196 	err = sc_misc_otp_fuse_read(-1, word2, &val2);
197 	if (err != SC_ERR_NONE) {
198 		printf("%s fuse %d read error: %d\n", __func__, word2, err);
199 		return;
200 	}
201 	serialnr->low = val1;
202 	serialnr->high = val2;
203 }
204 #endif /*CONFIG_SERIAL_TAG*/
205 
206 #ifdef CONFIG_ENV_IS_IN_MMC
board_mmc_get_env_dev(int devno)207 __weak int board_mmc_get_env_dev(int devno)
208 {
209 	return CONFIG_SYS_MMC_ENV_DEV;
210 }
211 
mmc_get_env_dev(void)212 int mmc_get_env_dev(void)
213 {
214 	sc_rsrc_t dev_rsrc;
215 	int devno;
216 
217 	sc_misc_get_boot_dev(-1, &dev_rsrc);
218 
219 	switch (dev_rsrc) {
220 	case SC_R_SDHC_0:
221 		devno = 0;
222 		break;
223 	case SC_R_SDHC_1:
224 		devno = 1;
225 		break;
226 	case SC_R_SDHC_2:
227 		devno = 2;
228 		break;
229 	default:
230 		/* If not boot from sd/mmc, use default value */
231 		return CONFIG_SYS_MMC_ENV_DEV;
232 	}
233 
234 	return board_mmc_get_env_dev(devno);
235 }
236 #endif
237 
238 #define MEMSTART_ALIGNMENT  SZ_2M /* Align the memory start with 2MB */
239 
get_owned_memreg(sc_rm_mr_t mr,sc_faddr_t * addr_start,sc_faddr_t * addr_end)240 static int get_owned_memreg(sc_rm_mr_t mr, sc_faddr_t *addr_start,
241 			    sc_faddr_t *addr_end)
242 {
243 	sc_faddr_t start, end;
244 	int ret;
245 	bool owned;
246 
247 	owned = sc_rm_is_memreg_owned(-1, mr);
248 	if (owned) {
249 		ret = sc_rm_get_memreg_info(-1, mr, &start, &end);
250 		if (ret) {
251 			printf("Memreg get info failed, %d\n", ret);
252 			return -EINVAL;
253 		}
254 		debug("0x%llx -- 0x%llx\n", start, end);
255 		*addr_start = start;
256 		*addr_end = end;
257 
258 		return 0;
259 	}
260 
261 	return -EINVAL;
262 }
263 
board_mem_get_layout(u64 * phys_sdram_1_start,u64 * phys_sdram_1_size,u64 * phys_sdram_2_start,u64 * phys_sdram_2_size)264 __weak void board_mem_get_layout(u64 *phys_sdram_1_start,
265 				 u64 *phys_sdram_1_size,
266 				 u64 *phys_sdram_2_start,
267 				 u64 *phys_sdram_2_size)
268 {
269 	*phys_sdram_1_start = PHYS_SDRAM_1;
270 	*phys_sdram_1_size = PHYS_SDRAM_1_SIZE;
271 	*phys_sdram_2_start = PHYS_SDRAM_2;
272 	*phys_sdram_2_size = PHYS_SDRAM_2_SIZE;
273 }
274 
get_effective_memsize(void)275 phys_size_t get_effective_memsize(void)
276 {
277 	sc_rm_mr_t mr;
278 	sc_faddr_t start, end, end1, start_aligned;
279 	u64 phys_sdram_1_start, phys_sdram_1_size;
280 	u64 phys_sdram_2_start, phys_sdram_2_size;
281 	int err;
282 
283 	board_mem_get_layout(&phys_sdram_1_start, &phys_sdram_1_size,
284 			     &phys_sdram_2_start, &phys_sdram_2_size);
285 
286 
287 	end1 = (sc_faddr_t)phys_sdram_1_start + phys_sdram_1_size;
288 	for (mr = 0; mr < 64; mr++) {
289 		err = get_owned_memreg(mr, &start, &end);
290 		if (!err) {
291 			start_aligned = roundup(start, MEMSTART_ALIGNMENT);
292 			/* Too small memory region, not use it */
293 			if (start_aligned > end)
294 				continue;
295 
296 			/* Find the memory region runs the U-Boot */
297 			if (start >= phys_sdram_1_start && start <= end1 &&
298 			    (start <= CONFIG_SYS_TEXT_BASE &&
299 			    end >= CONFIG_SYS_TEXT_BASE)) {
300 				if ((end + 1) <=
301 				    ((sc_faddr_t)phys_sdram_1_start +
302 				    phys_sdram_1_size))
303 					return (end - phys_sdram_1_start + 1);
304 				else
305 					return phys_sdram_1_size;
306 			}
307 		}
308 	}
309 
310 	return phys_sdram_1_size;
311 }
312 
dram_init(void)313 int dram_init(void)
314 {
315 	sc_rm_mr_t mr;
316 	sc_faddr_t start, end, end1, end2;
317 	u64 phys_sdram_1_start, phys_sdram_1_size;
318 	u64 phys_sdram_2_start, phys_sdram_2_size;
319 	int err;
320 
321 	board_mem_get_layout(&phys_sdram_1_start, &phys_sdram_1_size,
322 			     &phys_sdram_2_start, &phys_sdram_2_size);
323 
324 	end1 = (sc_faddr_t)phys_sdram_1_start + phys_sdram_1_size;
325 	end2 = (sc_faddr_t)phys_sdram_2_start + phys_sdram_2_size;
326 	for (mr = 0; mr < 64; mr++) {
327 		err = get_owned_memreg(mr, &start, &end);
328 		if (!err) {
329 			start = roundup(start, MEMSTART_ALIGNMENT);
330 			/* Too small memory region, not use it */
331 			if (start > end)
332 				continue;
333 
334 			if (start >= phys_sdram_1_start && start <= end1) {
335 				if ((end + 1) <= end1)
336 					gd->ram_size += end - start + 1;
337 				else
338 					gd->ram_size += end1 - start;
339 			} else if (start >= phys_sdram_2_start &&
340 				   start <= end2) {
341 				if ((end + 1) <= end2)
342 					gd->ram_size += end - start + 1;
343 				else
344 					gd->ram_size += end2 - start;
345 			}
346 		}
347 	}
348 
349 	/* If error, set to the default value */
350 	if (!gd->ram_size) {
351 		gd->ram_size = phys_sdram_1_size;
352 		gd->ram_size += phys_sdram_2_size;
353 	}
354 	return 0;
355 }
356 
dram_bank_sort(int current_bank)357 static void dram_bank_sort(int current_bank)
358 {
359 	phys_addr_t start;
360 	phys_size_t size;
361 
362 	while (current_bank > 0) {
363 		if (gd->bd->bi_dram[current_bank - 1].start >
364 		    gd->bd->bi_dram[current_bank].start) {
365 			start = gd->bd->bi_dram[current_bank - 1].start;
366 			size = gd->bd->bi_dram[current_bank - 1].size;
367 
368 			gd->bd->bi_dram[current_bank - 1].start =
369 				gd->bd->bi_dram[current_bank].start;
370 			gd->bd->bi_dram[current_bank - 1].size =
371 				gd->bd->bi_dram[current_bank].size;
372 
373 			gd->bd->bi_dram[current_bank].start = start;
374 			gd->bd->bi_dram[current_bank].size = size;
375 		}
376 		current_bank--;
377 	}
378 }
379 
dram_init_banksize(void)380 int dram_init_banksize(void)
381 {
382 	sc_rm_mr_t mr;
383 	sc_faddr_t start, end, end1, end2;
384 	int i = 0;
385 	u64 phys_sdram_1_start, phys_sdram_1_size;
386 	u64 phys_sdram_2_start, phys_sdram_2_size;
387 	int err;
388 
389 	board_mem_get_layout(&phys_sdram_1_start, &phys_sdram_1_size,
390 			     &phys_sdram_2_start, &phys_sdram_2_size);
391 
392 	end1 = (sc_faddr_t)phys_sdram_1_start + phys_sdram_1_size;
393 	end2 = (sc_faddr_t)phys_sdram_2_start + phys_sdram_2_size;
394 	for (mr = 0; mr < 64 && i < CONFIG_NR_DRAM_BANKS; mr++) {
395 		err = get_owned_memreg(mr, &start, &end);
396 		if (!err) {
397 			start = roundup(start, MEMSTART_ALIGNMENT);
398 			if (start > end) /* Small memory region, no use it */
399 				continue;
400 
401 			if (start >= phys_sdram_1_start && start <= end1) {
402 				gd->bd->bi_dram[i].start = start;
403 
404 				if ((end + 1) <= end1)
405 					gd->bd->bi_dram[i].size =
406 						end - start + 1;
407 				else
408 					gd->bd->bi_dram[i].size = end1 - start;
409 
410 				dram_bank_sort(i);
411 				i++;
412 			} else if (start >= phys_sdram_2_start && start <= end2) {
413 				gd->bd->bi_dram[i].start = start;
414 
415 				if ((end + 1) <= end2)
416 					gd->bd->bi_dram[i].size =
417 						end - start + 1;
418 				else
419 					gd->bd->bi_dram[i].size = end2 - start;
420 
421 				dram_bank_sort(i);
422 				i++;
423 			}
424 		}
425 	}
426 
427 	/* If error, set to the default value */
428 	if (!i) {
429 		gd->bd->bi_dram[0].start = phys_sdram_1_start;
430 		gd->bd->bi_dram[0].size = phys_sdram_1_size;
431 		gd->bd->bi_dram[1].start = phys_sdram_2_start;
432 		gd->bd->bi_dram[1].size = phys_sdram_2_size;
433 	}
434 
435 	return 0;
436 }
437 
get_block_attrs(sc_faddr_t addr_start)438 static u64 get_block_attrs(sc_faddr_t addr_start)
439 {
440 	u64 attr = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE |
441 		PTE_BLOCK_PXN | PTE_BLOCK_UXN;
442 	u64 phys_sdram_1_start, phys_sdram_1_size;
443 	u64 phys_sdram_2_start, phys_sdram_2_size;
444 
445 	board_mem_get_layout(&phys_sdram_1_start, &phys_sdram_1_size,
446 			     &phys_sdram_2_start, &phys_sdram_2_size);
447 
448 	if ((addr_start >= phys_sdram_1_start &&
449 	     addr_start <= ((sc_faddr_t)phys_sdram_1_start + phys_sdram_1_size)) ||
450 	    (addr_start >= phys_sdram_2_start &&
451 	     addr_start <= ((sc_faddr_t)phys_sdram_2_start + phys_sdram_2_size)))
452 		return (PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_OUTER_SHARE);
453 
454 	return attr;
455 }
456 
get_block_size(sc_faddr_t addr_start,sc_faddr_t addr_end)457 static u64 get_block_size(sc_faddr_t addr_start, sc_faddr_t addr_end)
458 {
459 	sc_faddr_t end1, end2;
460 	u64 phys_sdram_1_start, phys_sdram_1_size;
461 	u64 phys_sdram_2_start, phys_sdram_2_size;
462 
463 	board_mem_get_layout(&phys_sdram_1_start, &phys_sdram_1_size,
464 			     &phys_sdram_2_start, &phys_sdram_2_size);
465 
466 
467 	end1 = (sc_faddr_t)phys_sdram_1_start + phys_sdram_1_size;
468 	end2 = (sc_faddr_t)phys_sdram_2_start + phys_sdram_2_size;
469 
470 	if (addr_start >= phys_sdram_1_start && addr_start <= end1) {
471 		if ((addr_end + 1) > end1)
472 			return end1 - addr_start;
473 	} else if (addr_start >= phys_sdram_2_start && addr_start <= end2) {
474 		if ((addr_end + 1) > end2)
475 			return end2 - addr_start;
476 	}
477 
478 	return (addr_end - addr_start + 1);
479 }
480 
481 #define MAX_PTE_ENTRIES 512
482 #define MAX_MEM_MAP_REGIONS 16
483 
484 static struct mm_region imx8_mem_map[MAX_MEM_MAP_REGIONS];
485 struct mm_region *mem_map = imx8_mem_map;
486 
enable_caches(void)487 void enable_caches(void)
488 {
489 	sc_rm_mr_t mr;
490 	sc_faddr_t start, end;
491 	int err, i;
492 
493 	/* Create map for registers access from 0x1c000000 to 0x80000000*/
494 	imx8_mem_map[0].virt = 0x1c000000UL;
495 	imx8_mem_map[0].phys = 0x1c000000UL;
496 	imx8_mem_map[0].size = 0x64000000UL;
497 	imx8_mem_map[0].attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
498 			 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN;
499 
500 	i = 1;
501 	for (mr = 0; mr < 64 && i < MAX_MEM_MAP_REGIONS; mr++) {
502 		err = get_owned_memreg(mr, &start, &end);
503 		if (!err) {
504 			imx8_mem_map[i].virt = start;
505 			imx8_mem_map[i].phys = start;
506 			imx8_mem_map[i].size = get_block_size(start, end);
507 			imx8_mem_map[i].attrs = get_block_attrs(start);
508 			i++;
509 		}
510 	}
511 
512 	if (i < MAX_MEM_MAP_REGIONS) {
513 		imx8_mem_map[i].size = 0;
514 		imx8_mem_map[i].attrs = 0;
515 	} else {
516 		puts("Error, need more MEM MAP REGIONS reserved\n");
517 		icache_enable();
518 		return;
519 	}
520 
521 	for (i = 0; i < MAX_MEM_MAP_REGIONS; i++) {
522 		debug("[%d] vir = 0x%llx phys = 0x%llx size = 0x%llx attrs = 0x%llx\n",
523 		      i, imx8_mem_map[i].virt, imx8_mem_map[i].phys,
524 		      imx8_mem_map[i].size, imx8_mem_map[i].attrs);
525 	}
526 
527 	icache_enable();
528 	dcache_enable();
529 }
530 
531 #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
get_page_table_size(void)532 u64 get_page_table_size(void)
533 {
534 	u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64);
535 	u64 size = 0;
536 
537 	/*
538 	 * For each memory region, the max table size:
539 	 * 2 level 3 tables + 2 level 2 tables + 1 level 1 table
540 	 */
541 	size = (2 + 2 + 1) * one_pt * MAX_MEM_MAP_REGIONS + one_pt;
542 
543 	/*
544 	 * We need to duplicate our page table once to have an emergency pt to
545 	 * resort to when splitting page tables later on
546 	 */
547 	size *= 2;
548 
549 	/*
550 	 * We may need to split page tables later on if dcache settings change,
551 	 * so reserve up to 4 (random pick) page tables for that.
552 	 */
553 	size += one_pt * 4;
554 
555 	return size;
556 }
557 #endif
558 
559 #if defined(CONFIG_IMX8QM)
560 #define FUSE_MAC0_WORD0 452
561 #define FUSE_MAC0_WORD1 453
562 #define FUSE_MAC1_WORD0 454
563 #define FUSE_MAC1_WORD1 455
564 #elif defined(CONFIG_IMX8QXP)
565 #define FUSE_MAC0_WORD0 708
566 #define FUSE_MAC0_WORD1 709
567 #define FUSE_MAC1_WORD0 710
568 #define FUSE_MAC1_WORD1 711
569 #endif
570 
imx_get_mac_from_fuse(int dev_id,unsigned char * mac)571 void imx_get_mac_from_fuse(int dev_id, unsigned char *mac)
572 {
573 	u32 word[2], val[2] = {};
574 	int i, ret;
575 
576 	if (dev_id == 0) {
577 		word[0] = FUSE_MAC0_WORD0;
578 		word[1] = FUSE_MAC0_WORD1;
579 	} else {
580 		word[0] = FUSE_MAC1_WORD0;
581 		word[1] = FUSE_MAC1_WORD1;
582 	}
583 
584 	for (i = 0; i < 2; i++) {
585 		ret = sc_misc_otp_fuse_read(-1, word[i], &val[i]);
586 		if (ret < 0)
587 			goto err;
588 	}
589 
590 	mac[0] = val[0];
591 	mac[1] = val[0] >> 8;
592 	mac[2] = val[0] >> 16;
593 	mac[3] = val[0] >> 24;
594 	mac[4] = val[1];
595 	mac[5] = val[1] >> 8;
596 
597 	debug("%s: MAC%d: %02x.%02x.%02x.%02x.%02x.%02x\n",
598 	      __func__, dev_id, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
599 	return;
600 err:
601 	printf("%s: fuse %d, err: %d\n", __func__, word[i], ret);
602 }
603 
get_cpu_rev(void)604 u32 get_cpu_rev(void)
605 {
606 	u32 id = 0, rev = 0;
607 	int ret;
608 
609 	ret = sc_misc_get_control(-1, SC_R_SYSTEM, SC_C_ID, &id);
610 	if (ret)
611 		return 0;
612 
613 	rev = (id >> 5)  & 0xf;
614 	id = (id & 0x1f) + MXC_SOC_IMX8;  /* Dummy ID for chip */
615 
616 	return (id << 12) | rev;
617 }
618 
board_boot_order(u32 * spl_boot_list)619 void board_boot_order(u32 *spl_boot_list)
620 {
621 	spl_boot_list[0] = spl_boot_device();
622 
623 	if (spl_boot_list[0] == BOOT_DEVICE_SPI) {
624 		/* Check whether we own the flexspi0, if not, use NOR boot */
625 		if (!sc_rm_is_resource_owned(-1, SC_R_FSPI_0))
626 			spl_boot_list[0] = BOOT_DEVICE_NOR;
627 	}
628 }
629 
m4_parts_booted(void)630 bool m4_parts_booted(void)
631 {
632 	sc_rm_pt_t m4_parts[2];
633 	int err;
634 
635 	err = sc_rm_get_resource_owner(-1, SC_R_M4_0_PID0, &m4_parts[0]);
636 	if (err) {
637 		printf("%s get resource [%d] owner error: %d\n", __func__,
638 		       SC_R_M4_0_PID0, err);
639 		return false;
640 	}
641 
642 	if (sc_pm_is_partition_started(-1, m4_parts[0]))
643 		return true;
644 
645 	if (is_imx8qm()) {
646 		err = sc_rm_get_resource_owner(-1, SC_R_M4_1_PID0, &m4_parts[1]);
647 		if (err) {
648 			printf("%s get resource [%d] owner error: %d\n",
649 			       __func__, SC_R_M4_1_PID0, err);
650 			return false;
651 		}
652 
653 		if (sc_pm_is_partition_started(-1, m4_parts[1]))
654 			return true;
655 	}
656 
657 	return false;
658 }
659