xref: /linux/arch/powerpc/include/asm/fixmap.h (revision 9a6b55ac)
1 /*
2  * fixmap.h: compile-time virtual memory allocation
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License.  See the file "COPYING" in the main directory of this archive
6  * for more details.
7  *
8  * Copyright (C) 1998 Ingo Molnar
9  *
10  * Copyright 2008 Freescale Semiconductor Inc.
11  *   Port to powerpc added by Kumar Gala
12  */
13 
14 #ifndef _ASM_FIXMAP_H
15 #define _ASM_FIXMAP_H
16 
17 #ifndef __ASSEMBLY__
18 #include <linux/sizes.h>
19 #include <asm/page.h>
20 #include <asm/pgtable.h>
21 #ifdef CONFIG_HIGHMEM
22 #include <linux/threads.h>
23 #include <asm/kmap_types.h>
24 #endif
25 
26 #ifdef CONFIG_KASAN
27 #include <asm/kasan.h>
28 #define FIXADDR_TOP	(KASAN_SHADOW_START - PAGE_SIZE)
29 #else
30 #define FIXADDR_TOP	((unsigned long)(-PAGE_SIZE))
31 #endif
32 
33 /*
34  * Here we define all the compile-time 'special' virtual
35  * addresses. The point is to have a constant address at
36  * compile time, but to set the physical address only
37  * in the boot process. We allocate these special addresses
38  * from the end of virtual memory (0xfffff000) backwards.
39  * Also this lets us do fail-safe vmalloc(), we
40  * can guarantee that these special addresses and
41  * vmalloc()-ed addresses never overlap.
42  *
43  * these 'compile-time allocated' memory buffers are
44  * fixed-size 4k pages. (or larger if used with an increment
45  * highger than 1) use fixmap_set(idx,phys) to associate
46  * physical memory with fixmap indices.
47  *
48  * TLB entries of such buffers will not be flushed across
49  * task switches.
50  */
51 enum fixed_addresses {
52 	FIX_HOLE,
53 	/* reserve the top 128K for early debugging purposes */
54 	FIX_EARLY_DEBUG_TOP = FIX_HOLE,
55 	FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+((128*1024)/PAGE_SIZE)-1,
56 #ifdef CONFIG_HIGHMEM
57 	FIX_KMAP_BEGIN,	/* reserved pte's for temporary kernel mappings */
58 	FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
59 #endif
60 #ifdef CONFIG_PPC_8xx
61 	/* For IMMR we need an aligned 512K area */
62 #define FIX_IMMR_SIZE	(512 * 1024 / PAGE_SIZE)
63 	FIX_IMMR_START,
64 	FIX_IMMR_BASE = __ALIGN_MASK(FIX_IMMR_START, FIX_IMMR_SIZE - 1) - 1 +
65 		       FIX_IMMR_SIZE,
66 #endif
67 #ifdef CONFIG_PPC_83xx
68 	/* For IMMR we need an aligned 2M area */
69 #define FIX_IMMR_SIZE	(SZ_2M / PAGE_SIZE)
70 	FIX_IMMR_START,
71 	FIX_IMMR_BASE = __ALIGN_MASK(FIX_IMMR_START, FIX_IMMR_SIZE - 1) - 1 +
72 		       FIX_IMMR_SIZE,
73 #endif
74 	/* FIX_PCIE_MCFG, */
75 	__end_of_permanent_fixed_addresses,
76 
77 #define NR_FIX_BTMAPS		(SZ_256K / PAGE_SIZE)
78 #define FIX_BTMAPS_SLOTS	16
79 #define TOTAL_FIX_BTMAPS	(NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
80 
81 	FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
82 	FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
83 	__end_of_fixed_addresses
84 };
85 
86 #define __FIXADDR_SIZE	(__end_of_fixed_addresses << PAGE_SHIFT)
87 #define FIXADDR_START		(FIXADDR_TOP - __FIXADDR_SIZE)
88 
89 #define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NCG
90 #define FIXMAP_PAGE_IO	PAGE_KERNEL_NCG
91 
92 #include <asm-generic/fixmap.h>
93 
94 static inline void __set_fixmap(enum fixed_addresses idx,
95 				phys_addr_t phys, pgprot_t flags)
96 {
97 	if (__builtin_constant_p(idx))
98 		BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
99 	else if (WARN_ON(idx >= __end_of_fixed_addresses))
100 		return;
101 
102 	map_kernel_page(__fix_to_virt(idx), phys, flags);
103 }
104 
105 #define __early_set_fixmap	__set_fixmap
106 
107 #endif /* !__ASSEMBLY__ */
108 #endif
109