1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * (C) Copyright 2014 - 2015 Xilinx, Inc.
4  * Michal Simek <michal.simek@xilinx.com>
5  */
6 
7 #include <common.h>
8 #include <init.h>
9 #include <time.h>
10 #include <asm/arch/hardware.h>
11 #include <asm/arch/sys_proto.h>
12 #include <asm/armv8/mmu.h>
13 #include <asm/cache.h>
14 #include <asm/global_data.h>
15 #include <asm/io.h>
16 #include <zynqmp_firmware.h>
17 #include <asm/cache.h>
18 
19 #define ZYNQ_SILICON_VER_MASK	0xF000
20 #define ZYNQ_SILICON_VER_SHIFT	12
21 
22 DECLARE_GLOBAL_DATA_PTR;
23 
24 /*
25  * Number of filled static entries and also the first empty
26  * slot in zynqmp_mem_map.
27  */
28 #define ZYNQMP_MEM_MAP_USED	4
29 
30 #if !defined(CONFIG_ZYNQMP_NO_DDR)
31 #define DRAM_BANKS CONFIG_NR_DRAM_BANKS
32 #else
33 #define DRAM_BANKS 0
34 #endif
35 
36 #if defined(CONFIG_DEFINE_TCM_OCM_MMAP)
37 #define TCM_MAP 1
38 #else
39 #define TCM_MAP 0
40 #endif
41 
42 /* +1 is end of list which needs to be empty */
43 #define ZYNQMP_MEM_MAP_MAX (ZYNQMP_MEM_MAP_USED + DRAM_BANKS + TCM_MAP + 1)
44 
45 static struct mm_region zynqmp_mem_map[ZYNQMP_MEM_MAP_MAX] = {
46 	{
47 		.virt = 0x80000000UL,
48 		.phys = 0x80000000UL,
49 		.size = 0x70000000UL,
50 		.attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
51 			 PTE_BLOCK_NON_SHARE |
52 			 PTE_BLOCK_PXN | PTE_BLOCK_UXN
53 	}, {
54 		.virt = 0xf8000000UL,
55 		.phys = 0xf8000000UL,
56 		.size = 0x07e00000UL,
57 		.attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
58 			 PTE_BLOCK_NON_SHARE |
59 			 PTE_BLOCK_PXN | PTE_BLOCK_UXN
60 	}, {
61 		.virt = 0x400000000UL,
62 		.phys = 0x400000000UL,
63 		.size = 0x400000000UL,
64 		.attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
65 			 PTE_BLOCK_NON_SHARE |
66 			 PTE_BLOCK_PXN | PTE_BLOCK_UXN
67 	}, {
68 		.virt = 0x1000000000UL,
69 		.phys = 0x1000000000UL,
70 		.size = 0xf000000000UL,
71 		.attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
72 			 PTE_BLOCK_NON_SHARE |
73 			 PTE_BLOCK_PXN | PTE_BLOCK_UXN
74 	}
75 };
76 
mem_map_fill(void)77 void mem_map_fill(void)
78 {
79 	int banks = ZYNQMP_MEM_MAP_USED;
80 
81 #if defined(CONFIG_DEFINE_TCM_OCM_MMAP)
82 	zynqmp_mem_map[banks].virt = 0xffe00000UL;
83 	zynqmp_mem_map[banks].phys = 0xffe00000UL;
84 	zynqmp_mem_map[banks].size = 0x00200000UL;
85 	zynqmp_mem_map[banks].attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) |
86 				      PTE_BLOCK_INNER_SHARE;
87 	banks = banks + 1;
88 #endif
89 
90 #if !defined(CONFIG_ZYNQMP_NO_DDR)
91 	for (int i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
92 		/* Zero size means no more DDR that's this is end */
93 		if (!gd->bd->bi_dram[i].size)
94 			break;
95 
96 		zynqmp_mem_map[banks].virt = gd->bd->bi_dram[i].start;
97 		zynqmp_mem_map[banks].phys = gd->bd->bi_dram[i].start;
98 		zynqmp_mem_map[banks].size = gd->bd->bi_dram[i].size;
99 		zynqmp_mem_map[banks].attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) |
100 					      PTE_BLOCK_INNER_SHARE;
101 		banks = banks + 1;
102 	}
103 #endif
104 }
105 
106 struct mm_region *mem_map = zynqmp_mem_map;
107 
get_page_table_size(void)108 u64 get_page_table_size(void)
109 {
110 	return 0x14000;
111 }
112 
113 #if defined(CONFIG_SYS_MEM_RSVD_FOR_MMU) || defined(CONFIG_DEFINE_TCM_OCM_MMAP)
tcm_init(u8 mode)114 void tcm_init(u8 mode)
115 {
116 	puts("WARNING: Initializing TCM overwrites TCM content\n");
117 	initialize_tcm(mode);
118 	memset((void *)ZYNQMP_TCM_BASE_ADDR, 0, ZYNQMP_TCM_SIZE);
119 }
120 #endif
121 
122 #ifdef CONFIG_SYS_MEM_RSVD_FOR_MMU
arm_reserve_mmu(void)123 int arm_reserve_mmu(void)
124 {
125 	tcm_init(TCM_LOCK);
126 	gd->arch.tlb_size = PGTABLE_SIZE;
127 	gd->arch.tlb_addr = ZYNQMP_TCM_BASE_ADDR;
128 
129 	return 0;
130 }
131 #endif
132 
zynqmp_get_silicon_version_secure(void)133 static unsigned int zynqmp_get_silicon_version_secure(void)
134 {
135 	u32 ver;
136 
137 	ver = readl(&csu_base->version);
138 	ver &= ZYNQMP_SILICON_VER_MASK;
139 	ver >>= ZYNQMP_SILICON_VER_SHIFT;
140 
141 	return ver;
142 }
143 
zynqmp_get_silicon_version(void)144 unsigned int zynqmp_get_silicon_version(void)
145 {
146 	if (current_el() == 3)
147 		return zynqmp_get_silicon_version_secure();
148 
149 	gd->cpu_clk = get_tbclk();
150 
151 	switch (gd->cpu_clk) {
152 	case 50000000:
153 		return ZYNQMP_CSU_VERSION_QEMU;
154 	}
155 
156 	return ZYNQMP_CSU_VERSION_SILICON;
157 }
158 
zynqmp_mmio_rawwrite(const u32 address,const u32 mask,const u32 value)159 static int zynqmp_mmio_rawwrite(const u32 address,
160 		      const u32 mask,
161 		      const u32 value)
162 {
163 	u32 data;
164 	u32 value_local = value;
165 	int ret;
166 
167 	ret = zynqmp_mmio_read(address, &data);
168 	if (ret)
169 		return ret;
170 
171 	data &= ~mask;
172 	value_local &= mask;
173 	value_local |= data;
174 	writel(value_local, (ulong)address);
175 	return 0;
176 }
177 
zynqmp_mmio_rawread(const u32 address,u32 * value)178 static int zynqmp_mmio_rawread(const u32 address, u32 *value)
179 {
180 	*value = readl((ulong)address);
181 	return 0;
182 }
183 
zynqmp_mmio_write(const u32 address,const u32 mask,const u32 value)184 int zynqmp_mmio_write(const u32 address,
185 		      const u32 mask,
186 		      const u32 value)
187 {
188 	if (IS_ENABLED(CONFIG_SPL_BUILD) || current_el() == 3)
189 		return zynqmp_mmio_rawwrite(address, mask, value);
190 #if defined(CONFIG_ZYNQMP_FIRMWARE)
191 	else
192 		return xilinx_pm_request(PM_MMIO_WRITE, address, mask,
193 					 value, 0, NULL);
194 #endif
195 
196 	return -EINVAL;
197 }
198 
zynqmp_mmio_read(const u32 address,u32 * value)199 int zynqmp_mmio_read(const u32 address, u32 *value)
200 {
201 	u32 ret = -EINVAL;
202 
203 	if (!value)
204 		return ret;
205 
206 	if (IS_ENABLED(CONFIG_SPL_BUILD) || current_el() == 3) {
207 		ret = zynqmp_mmio_rawread(address, value);
208 	}
209 #if defined(CONFIG_ZYNQMP_FIRMWARE)
210 	else {
211 		u32 ret_payload[PAYLOAD_ARG_CNT];
212 
213 		ret = xilinx_pm_request(PM_MMIO_READ, address, 0, 0,
214 					0, ret_payload);
215 		*value = ret_payload[1];
216 	}
217 #endif
218 
219 	return ret;
220 }
221