1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_64_HASH_4K_H
3 #define _ASM_POWERPC_BOOK3S_64_HASH_4K_H
4
5 #define H_PTE_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps: 2^9 x 4KB = 2MB
6 #define H_PMD_INDEX_SIZE 7 // size: 8B << 7 = 1KB, maps: 2^7 x 2MB = 256MB
7 #define H_PUD_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps: 2^9 x 256MB = 128GB
8 #define H_PGD_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps: 2^9 x 128GB = 64TB
9
10 /*
11 * Each context is 512TB. But on 4k we restrict our max TASK size to 64TB
12 * Hence also limit max EA bits to 64TB.
13 */
14 #define MAX_EA_BITS_PER_CONTEXT 46
15
16
17 /*
18 * Our page table limit us to 64TB. For 64TB physical memory, we only need 64GB
19 * of vmemmap space. To better support sparse memory layout, we use 61TB
20 * linear map range, 1TB of vmalloc, 1TB of I/O and 1TB of vmememmap.
21 */
22 #define REGION_SHIFT (40)
23 #define H_KERN_MAP_SIZE (ASM_CONST(1) << REGION_SHIFT)
24
25 /*
26 * Limits the linear mapping range
27 */
28 #define H_MAX_PHYSMEM_BITS 46
29
30 /*
31 * Define the address range of the kernel non-linear virtual area (61TB)
32 */
33 #define H_KERN_VIRT_START ASM_CONST(0xc0003d0000000000)
34
35 #ifndef __ASSEMBLY__
36 #define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE)
37 #define H_PMD_TABLE_SIZE (sizeof(pmd_t) << H_PMD_INDEX_SIZE)
38 #define H_PUD_TABLE_SIZE (sizeof(pud_t) << H_PUD_INDEX_SIZE)
39 #define H_PGD_TABLE_SIZE (sizeof(pgd_t) << H_PGD_INDEX_SIZE)
40
41 #define H_PAGE_F_GIX_SHIFT _PAGE_PA_MAX
42 #define H_PAGE_F_SECOND _RPAGE_PKEY_BIT0 /* HPTE is in 2ndary HPTEG */
43 #define H_PAGE_F_GIX (_RPAGE_RPN43 | _RPAGE_RPN42 | _RPAGE_RPN41)
44 #define H_PAGE_BUSY _RPAGE_RSV1
45 #define H_PAGE_HASHPTE _RPAGE_PKEY_BIT4
46
47 /* PTE flags to conserve for HPTE identification */
48 #define _PAGE_HPTEFLAGS (H_PAGE_BUSY | H_PAGE_HASHPTE | \
49 H_PAGE_F_SECOND | H_PAGE_F_GIX)
50 /*
51 * Not supported by 4k linux page size
52 */
53 #define H_PAGE_4K_PFN 0x0
54 #define H_PAGE_THP_HUGE 0x0
55 #define H_PAGE_COMBO 0x0
56
57 /* 8 bytes per each pte entry */
58 #define H_PTE_FRAG_SIZE_SHIFT (H_PTE_INDEX_SIZE + 3)
59 #define H_PTE_FRAG_NR (PAGE_SIZE >> H_PTE_FRAG_SIZE_SHIFT)
60 #define H_PMD_FRAG_SIZE_SHIFT (H_PMD_INDEX_SIZE + 3)
61 #define H_PMD_FRAG_NR (PAGE_SIZE >> H_PMD_FRAG_SIZE_SHIFT)
62
63 /* memory key bits, only 8 keys supported */
64 #define H_PTE_PKEY_BIT4 0
65 #define H_PTE_PKEY_BIT3 0
66 #define H_PTE_PKEY_BIT2 _RPAGE_PKEY_BIT3
67 #define H_PTE_PKEY_BIT1 _RPAGE_PKEY_BIT2
68 #define H_PTE_PKEY_BIT0 _RPAGE_PKEY_BIT1
69
70
71 /*
72 * On all 4K setups, remap_4k_pfn() equates to remap_pfn_range()
73 */
74 #define remap_4k_pfn(vma, addr, pfn, prot) \
75 remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
76
77 /*
78 * With 4K page size the real_pte machinery is all nops.
79 */
80 #define __real_pte(e, p, o) ((real_pte_t){(e)})
81 #define __rpte_to_pte(r) ((r).pte)
82 #define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT)
83
84 #define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
85 do { \
86 index = 0; \
87 shift = mmu_psize_defs[psize].shift; \
88
89 #define pte_iterate_hashed_end() } while(0)
90
91 /*
92 * We expect this to be called only for user addresses or kernel virtual
93 * addresses other than the linear mapping.
94 */
95 #define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
96
97 /*
98 * 4K PTE format is different from 64K PTE format. Saving the hash_slot is just
99 * a matter of returning the PTE bits that need to be modified. On 64K PTE,
100 * things are a little more involved and hence needs many more parameters to
101 * accomplish the same. However we want to abstract this out from the caller by
102 * keeping the prototype consistent across the two formats.
103 */
pte_set_hidx(pte_t * ptep,real_pte_t rpte,unsigned int subpg_index,unsigned long hidx,int offset)104 static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte,
105 unsigned int subpg_index, unsigned long hidx,
106 int offset)
107 {
108 return (hidx << H_PAGE_F_GIX_SHIFT) &
109 (H_PAGE_F_SECOND | H_PAGE_F_GIX);
110 }
111
112 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
113
get_hpte_slot_array(pmd_t * pmdp)114 static inline char *get_hpte_slot_array(pmd_t *pmdp)
115 {
116 BUG();
117 return NULL;
118 }
119
hpte_valid(unsigned char * hpte_slot_array,int index)120 static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index)
121 {
122 BUG();
123 return 0;
124 }
125
hpte_hash_index(unsigned char * hpte_slot_array,int index)126 static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array,
127 int index)
128 {
129 BUG();
130 return 0;
131 }
132
mark_hpte_slot_valid(unsigned char * hpte_slot_array,unsigned int index,unsigned int hidx)133 static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array,
134 unsigned int index, unsigned int hidx)
135 {
136 BUG();
137 }
138
hash__pmd_trans_huge(pmd_t pmd)139 static inline int hash__pmd_trans_huge(pmd_t pmd)
140 {
141 return 0;
142 }
143
hash__pmd_mkhuge(pmd_t pmd)144 static inline pmd_t hash__pmd_mkhuge(pmd_t pmd)
145 {
146 BUG();
147 return pmd;
148 }
149
150 extern unsigned long hash__pmd_hugepage_update(struct mm_struct *mm,
151 unsigned long addr, pmd_t *pmdp,
152 unsigned long clr, unsigned long set);
153 extern pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma,
154 unsigned long address, pmd_t *pmdp);
155 extern void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
156 pgtable_t pgtable);
157 extern pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
158 extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
159 unsigned long addr, pmd_t *pmdp);
160 extern int hash__has_transparent_hugepage(void);
161 #endif
162
hash__pmd_mkdevmap(pmd_t pmd)163 static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd)
164 {
165 BUG();
166 return pmd;
167 }
168
169 #endif /* !__ASSEMBLY__ */
170
171 #endif /* _ASM_POWERPC_BOOK3S_64_HASH_4K_H */
172