xref: /linux/arch/loongarch/mm/init.c (revision d642ef71)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4  */
5 #include <linux/init.h>
6 #include <linux/export.h>
7 #include <linux/signal.h>
8 #include <linux/sched.h>
9 #include <linux/smp.h>
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/string.h>
13 #include <linux/types.h>
14 #include <linux/pagemap.h>
15 #include <linux/memblock.h>
16 #include <linux/memremap.h>
17 #include <linux/mm.h>
18 #include <linux/mman.h>
19 #include <linux/highmem.h>
20 #include <linux/swap.h>
21 #include <linux/proc_fs.h>
22 #include <linux/pfn.h>
23 #include <linux/hardirq.h>
24 #include <linux/gfp.h>
25 #include <linux/hugetlb.h>
26 #include <linux/mmzone.h>
27 
28 #include <asm/asm-offsets.h>
29 #include <asm/bootinfo.h>
30 #include <asm/cpu.h>
31 #include <asm/dma.h>
32 #include <asm/mmu_context.h>
33 #include <asm/sections.h>
34 #include <asm/pgtable.h>
35 #include <asm/pgalloc.h>
36 #include <asm/tlb.h>
37 
38 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
39 EXPORT_SYMBOL(empty_zero_page);
40 
41 void copy_user_highpage(struct page *to, struct page *from,
42 	unsigned long vaddr, struct vm_area_struct *vma)
43 {
44 	void *vfrom, *vto;
45 
46 	vfrom = kmap_local_page(from);
47 	vto = kmap_local_page(to);
48 	copy_page(vto, vfrom);
49 	kunmap_local(vfrom);
50 	kunmap_local(vto);
51 	/* Make sure this page is cleared on other CPU's too before using it */
52 	smp_wmb();
53 }
54 
55 int __ref page_is_ram(unsigned long pfn)
56 {
57 	unsigned long addr = PFN_PHYS(pfn);
58 
59 	return memblock_is_memory(addr) && !memblock_is_reserved(addr);
60 }
61 
62 #ifndef CONFIG_NUMA
63 void __init paging_init(void)
64 {
65 	unsigned long max_zone_pfns[MAX_NR_ZONES];
66 
67 #ifdef CONFIG_ZONE_DMA
68 	max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
69 #endif
70 #ifdef CONFIG_ZONE_DMA32
71 	max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
72 #endif
73 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
74 
75 	free_area_init(max_zone_pfns);
76 }
77 
78 void __init mem_init(void)
79 {
80 	max_mapnr = max_low_pfn;
81 	high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
82 
83 	memblock_free_all();
84 }
85 #endif /* !CONFIG_NUMA */
86 
87 void __ref free_initmem(void)
88 {
89 	free_initmem_default(POISON_FREE_INITMEM);
90 }
91 
92 #ifdef CONFIG_MEMORY_HOTPLUG
93 int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params)
94 {
95 	unsigned long start_pfn = start >> PAGE_SHIFT;
96 	unsigned long nr_pages = size >> PAGE_SHIFT;
97 	int ret;
98 
99 	ret = __add_pages(nid, start_pfn, nr_pages, params);
100 
101 	if (ret)
102 		pr_warn("%s: Problem encountered in __add_pages() as ret=%d\n",
103 				__func__,  ret);
104 
105 	return ret;
106 }
107 
108 void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
109 {
110 	unsigned long start_pfn = start >> PAGE_SHIFT;
111 	unsigned long nr_pages = size >> PAGE_SHIFT;
112 	struct page *page = pfn_to_page(start_pfn);
113 
114 	/* With altmap the first mapped page is offset from @start */
115 	if (altmap)
116 		page += vmem_altmap_offset(altmap);
117 	__remove_pages(start_pfn, nr_pages, altmap);
118 }
119 
120 #ifdef CONFIG_NUMA
121 int memory_add_physaddr_to_nid(u64 start)
122 {
123 	return pa_to_nid(start);
124 }
125 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
126 #endif
127 #endif
128 
129 #ifdef CONFIG_SPARSEMEM_VMEMMAP
130 void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
131 			       unsigned long addr, unsigned long next)
132 {
133 	pmd_t entry;
134 
135 	entry = pfn_pmd(virt_to_pfn(p), PAGE_KERNEL);
136 	pmd_val(entry) |= _PAGE_HUGE | _PAGE_HGLOBAL;
137 	set_pmd_at(&init_mm, addr, pmd, entry);
138 }
139 
140 int __meminit vmemmap_check_pmd(pmd_t *pmd, int node,
141 				unsigned long addr, unsigned long next)
142 {
143 	int huge = pmd_val(*pmd) & _PAGE_HUGE;
144 
145 	if (huge)
146 		vmemmap_verify((pte_t *)pmd, node, addr, next);
147 
148 	return huge;
149 }
150 
151 int __meminit vmemmap_populate(unsigned long start, unsigned long end,
152 			       int node, struct vmem_altmap *altmap)
153 {
154 #if CONFIG_PGTABLE_LEVELS == 2
155 	return vmemmap_populate_basepages(start, end, node, NULL);
156 #else
157 	return vmemmap_populate_hugepages(start, end, node, NULL);
158 #endif
159 }
160 
161 #ifdef CONFIG_MEMORY_HOTPLUG
162 void vmemmap_free(unsigned long start, unsigned long end, struct vmem_altmap *altmap)
163 {
164 }
165 #endif
166 #endif
167 
168 pte_t * __init populate_kernel_pte(unsigned long addr)
169 {
170 	pgd_t *pgd = pgd_offset_k(addr);
171 	p4d_t *p4d = p4d_offset(pgd, addr);
172 	pud_t *pud;
173 	pmd_t *pmd;
174 
175 	if (p4d_none(*p4d)) {
176 		pud = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
177 		if (!pud)
178 			panic("%s: Failed to allocate memory\n", __func__);
179 		p4d_populate(&init_mm, p4d, pud);
180 #ifndef __PAGETABLE_PUD_FOLDED
181 		pud_init(pud);
182 #endif
183 	}
184 
185 	pud = pud_offset(p4d, addr);
186 	if (pud_none(*pud)) {
187 		pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
188 		if (!pmd)
189 			panic("%s: Failed to allocate memory\n", __func__);
190 		pud_populate(&init_mm, pud, pmd);
191 #ifndef __PAGETABLE_PMD_FOLDED
192 		pmd_init(pmd);
193 #endif
194 	}
195 
196 	pmd = pmd_offset(pud, addr);
197 	if (!pmd_present(*pmd)) {
198 		pte_t *pte;
199 
200 		pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
201 		if (!pte)
202 			panic("%s: Failed to allocate memory\n", __func__);
203 		pmd_populate_kernel(&init_mm, pmd, pte);
204 	}
205 
206 	return pte_offset_kernel(pmd, addr);
207 }
208 
209 void __init __set_fixmap(enum fixed_addresses idx,
210 			       phys_addr_t phys, pgprot_t flags)
211 {
212 	unsigned long addr = __fix_to_virt(idx);
213 	pte_t *ptep;
214 
215 	BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
216 
217 	ptep = populate_kernel_pte(addr);
218 	if (!pte_none(*ptep)) {
219 		pte_ERROR(*ptep);
220 		return;
221 	}
222 
223 	if (pgprot_val(flags))
224 		set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
225 	else {
226 		pte_clear(&init_mm, addr, ptep);
227 		flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
228 	}
229 }
230 
231 /*
232  * Align swapper_pg_dir in to 64K, allows its address to be loaded
233  * with a single LUI instruction in the TLB handlers.  If we used
234  * __aligned(64K), its size would get rounded up to the alignment
235  * size, and waste space.  So we place it in its own section and align
236  * it in the linker script.
237  */
238 pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
239 
240 pgd_t invalid_pg_dir[_PTRS_PER_PGD] __page_aligned_bss;
241 #ifndef __PAGETABLE_PUD_FOLDED
242 pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
243 EXPORT_SYMBOL(invalid_pud_table);
244 #endif
245 #ifndef __PAGETABLE_PMD_FOLDED
246 pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
247 EXPORT_SYMBOL(invalid_pmd_table);
248 #endif
249 pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
250 EXPORT_SYMBOL(invalid_pte_table);
251