xref: /qemu/target/riscv/pmp.c (revision 526d7984)
1 /*
2  * QEMU RISC-V PMP (Physical Memory Protection)
3  *
4  * Author: Daire McNamara, daire.mcnamara@emdalo.com
5  *         Ivan Griffin, ivan.griffin@emdalo.com
6  *
7  * This provides a RISC-V Physical Memory Protection implementation
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms and conditions of the GNU General Public License,
11  * version 2 or later, as published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along with
19  * this program.  If not, see <http://www.gnu.org/licenses/>.
20  */
21 
22 /*
23  * PMP (Physical Memory Protection) is as-of-yet unused and needs testing.
24  */
25 
26 #include "qemu/osdep.h"
27 #include "qemu/log.h"
28 #include "qapi/error.h"
29 #include "cpu.h"
30 #include "qemu-common.h"
31 
32 #ifndef CONFIG_USER_ONLY
33 
34 #define RISCV_DEBUG_PMP 0
35 #define PMP_DEBUG(fmt, ...)                                                    \
36     do {                                                                       \
37         if (RISCV_DEBUG_PMP) {                                                 \
38             qemu_log_mask(LOG_TRACE, "%s: " fmt "\n", __func__, ##__VA_ARGS__);\
39         }                                                                      \
40     } while (0)
41 
42 static void pmp_write_cfg(CPURISCVState *env, uint32_t addr_index,
43     uint8_t val);
44 static uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t addr_index);
45 static void pmp_update_rule(CPURISCVState *env, uint32_t pmp_index);
46 
47 /*
48  * Accessor method to extract address matching type 'a field' from cfg reg
49  */
50 static inline uint8_t pmp_get_a_field(uint8_t cfg)
51 {
52     uint8_t a = cfg >> 3;
53     return a & 0x3;
54 }
55 
56 /*
57  * Check whether a PMP is locked or not.
58  */
59 static inline int pmp_is_locked(CPURISCVState *env, uint32_t pmp_index)
60 {
61 
62     if (env->pmp_state.pmp[pmp_index].cfg_reg & PMP_LOCK) {
63         return 1;
64     }
65 
66     /* Top PMP has no 'next' to check */
67     if ((pmp_index + 1u) >= MAX_RISCV_PMPS) {
68         return 0;
69     }
70 
71     /* In TOR mode, need to check the lock bit of the next pmp
72      * (if there is a next)
73      */
74     const uint8_t a_field =
75         pmp_get_a_field(env->pmp_state.pmp[pmp_index + 1].cfg_reg);
76     if ((env->pmp_state.pmp[pmp_index + 1u].cfg_reg & PMP_LOCK) &&
77          (PMP_AMATCH_TOR == a_field)) {
78         return 1;
79     }
80 
81     return 0;
82 }
83 
84 /*
85  * Count the number of active rules.
86  */
87 static inline uint32_t pmp_get_num_rules(CPURISCVState *env)
88 {
89      return env->pmp_state.num_rules;
90 }
91 
92 /*
93  * Accessor to get the cfg reg for a specific PMP/HART
94  */
95 static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
96 {
97     if (pmp_index < MAX_RISCV_PMPS) {
98         return env->pmp_state.pmp[pmp_index].cfg_reg;
99     }
100 
101     return 0;
102 }
103 
104 
105 /*
106  * Accessor to set the cfg reg for a specific PMP/HART
107  * Bounds checks and relevant lock bit.
108  */
109 static void pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
110 {
111     if (pmp_index < MAX_RISCV_PMPS) {
112         if (!pmp_is_locked(env, pmp_index)) {
113             env->pmp_state.pmp[pmp_index].cfg_reg = val;
114             pmp_update_rule(env, pmp_index);
115         } else {
116             PMP_DEBUG("ignoring write - locked");
117         }
118     } else {
119         PMP_DEBUG("ignoring write - out of bounds");
120     }
121 }
122 
123 static void pmp_decode_napot(target_ulong a, target_ulong *sa, target_ulong *ea)
124 {
125     /*
126        aaaa...aaa0   8-byte NAPOT range
127        aaaa...aa01   16-byte NAPOT range
128        aaaa...a011   32-byte NAPOT range
129        ...
130        aa01...1111   2^XLEN-byte NAPOT range
131        a011...1111   2^(XLEN+1)-byte NAPOT range
132        0111...1111   2^(XLEN+2)-byte NAPOT range
133        1111...1111   Reserved
134     */
135     if (a == -1) {
136         *sa = 0u;
137         *ea = -1;
138         return;
139     } else {
140         target_ulong t1 = ctz64(~a);
141         target_ulong base = (a & ~(((target_ulong)1 << t1) - 1)) << 2;
142         target_ulong range = ((target_ulong)1 << (t1 + 3)) - 1;
143         *sa = base;
144         *ea = base + range;
145     }
146 }
147 
148 
149 /* Convert cfg/addr reg values here into simple 'sa' --> start address and 'ea'
150  *   end address values.
151  *   This function is called relatively infrequently whereas the check that
152  *   an address is within a pmp rule is called often, so optimise that one
153  */
154 static void pmp_update_rule(CPURISCVState *env, uint32_t pmp_index)
155 {
156     int i;
157 
158     env->pmp_state.num_rules = 0;
159 
160     uint8_t this_cfg = env->pmp_state.pmp[pmp_index].cfg_reg;
161     target_ulong this_addr = env->pmp_state.pmp[pmp_index].addr_reg;
162     target_ulong prev_addr = 0u;
163     target_ulong sa = 0u;
164     target_ulong ea = 0u;
165 
166     if (pmp_index >= 1u) {
167         prev_addr = env->pmp_state.pmp[pmp_index - 1].addr_reg;
168     }
169 
170     switch (pmp_get_a_field(this_cfg)) {
171     case PMP_AMATCH_OFF:
172         sa = 0u;
173         ea = -1;
174         break;
175 
176     case PMP_AMATCH_TOR:
177         sa = prev_addr << 2; /* shift up from [xx:0] to [xx+2:2] */
178         ea = (this_addr << 2) - 1u;
179         break;
180 
181     case PMP_AMATCH_NA4:
182         sa = this_addr << 2; /* shift up from [xx:0] to [xx+2:2] */
183         ea = (this_addr + 4u) - 1u;
184         break;
185 
186     case PMP_AMATCH_NAPOT:
187         pmp_decode_napot(this_addr, &sa, &ea);
188         break;
189 
190     default:
191         sa = 0u;
192         ea = 0u;
193         break;
194     }
195 
196     env->pmp_state.addr[pmp_index].sa = sa;
197     env->pmp_state.addr[pmp_index].ea = ea;
198 
199     for (i = 0; i < MAX_RISCV_PMPS; i++) {
200         const uint8_t a_field =
201             pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
202         if (PMP_AMATCH_OFF != a_field) {
203             env->pmp_state.num_rules++;
204         }
205     }
206 }
207 
208 static int pmp_is_in_range(CPURISCVState *env, int pmp_index, target_ulong addr)
209 {
210     int result = 0;
211 
212     if ((addr >= env->pmp_state.addr[pmp_index].sa)
213         && (addr <= env->pmp_state.addr[pmp_index].ea)) {
214         result = 1;
215     } else {
216         result = 0;
217     }
218 
219     return result;
220 }
221 
222 
223 /*
224  * Public Interface
225  */
226 
227 /*
228  * Check if the address has required RWX privs to complete desired operation
229  */
230 bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
231     target_ulong size, pmp_priv_t privs)
232 {
233     int i = 0;
234     int ret = -1;
235     target_ulong s = 0;
236     target_ulong e = 0;
237     pmp_priv_t allowed_privs = 0;
238 
239     /* Short cut if no rules */
240     if (0 == pmp_get_num_rules(env)) {
241         return true;
242     }
243 
244     /* 1.10 draft priv spec states there is an implicit order
245          from low to high */
246     for (i = 0; i < MAX_RISCV_PMPS; i++) {
247         s = pmp_is_in_range(env, i, addr);
248         e = pmp_is_in_range(env, i, addr + size);
249 
250         /* partially inside */
251         if ((s + e) == 1) {
252             PMP_DEBUG("pmp violation - access is partially inside");
253             ret = 0;
254             break;
255         }
256 
257         /* fully inside */
258         const uint8_t a_field =
259             pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
260         if ((s + e) == 2) {
261             if (PMP_AMATCH_OFF == a_field) {
262                 return 1;
263             }
264 
265             allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
266             if ((env->priv != PRV_M) || pmp_is_locked(env, i)) {
267                 allowed_privs &= env->pmp_state.pmp[i].cfg_reg;
268             }
269 
270             if ((privs & allowed_privs) == privs) {
271                 ret = 1;
272                 break;
273             } else {
274                 ret = 0;
275                 break;
276             }
277         }
278     }
279 
280     /* No rule matched */
281     if (ret == -1) {
282         if (env->priv == PRV_M) {
283             ret = 1; /* Privileged spec v1.10 states if no PMP entry matches an
284                       * M-Mode access, the access succeeds */
285         } else {
286             ret = 0; /* Other modes are not allowed to succeed if they don't
287                       * match a rule, but there are rules.  We've checked for
288                       * no rule earlier in this function. */
289         }
290     }
291 
292     return ret == 1 ? true : false;
293 }
294 
295 
296 /*
297  * Handle a write to a pmpcfg CSP
298  */
299 void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index,
300     target_ulong val)
301 {
302     int i;
303     uint8_t cfg_val;
304 
305     PMP_DEBUG("hart " TARGET_FMT_ld ": reg%d, val: 0x" TARGET_FMT_lx,
306         env->mhartid, reg_index, val);
307 
308     if ((reg_index & 1) && (sizeof(target_ulong) == 8)) {
309         PMP_DEBUG("ignoring write - incorrect address");
310         return;
311     }
312 
313     for (i = 0; i < sizeof(target_ulong); i++) {
314         cfg_val = (val >> 8 * i)  & 0xff;
315         pmp_write_cfg(env, (reg_index * sizeof(target_ulong)) + i,
316             cfg_val);
317     }
318 }
319 
320 
321 /*
322  * Handle a read from a pmpcfg CSP
323  */
324 target_ulong pmpcfg_csr_read(CPURISCVState *env, uint32_t reg_index)
325 {
326     int i;
327     target_ulong cfg_val = 0;
328     target_ulong val = 0;
329 
330     for (i = 0; i < sizeof(target_ulong); i++) {
331         val = pmp_read_cfg(env, (reg_index * sizeof(target_ulong)) + i);
332         cfg_val |= (val << (i * 8));
333     }
334 
335     PMP_DEBUG("hart " TARGET_FMT_ld ": reg%d, val: 0x" TARGET_FMT_lx,
336         env->mhartid, reg_index, cfg_val);
337 
338     return cfg_val;
339 }
340 
341 
342 /*
343  * Handle a write to a pmpaddr CSP
344  */
345 void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
346     target_ulong val)
347 {
348     PMP_DEBUG("hart " TARGET_FMT_ld ": addr%d, val: 0x" TARGET_FMT_lx,
349         env->mhartid, addr_index, val);
350 
351     if (addr_index < MAX_RISCV_PMPS) {
352         if (!pmp_is_locked(env, addr_index)) {
353             env->pmp_state.pmp[addr_index].addr_reg = val;
354             pmp_update_rule(env, addr_index);
355         } else {
356             PMP_DEBUG("ignoring write - locked");
357         }
358     } else {
359         PMP_DEBUG("ignoring write - out of bounds");
360     }
361 }
362 
363 
364 /*
365  * Handle a read from a pmpaddr CSP
366  */
367 target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index)
368 {
369     PMP_DEBUG("hart " TARGET_FMT_ld ": addr%d, val: 0x" TARGET_FMT_lx,
370         env->mhartid, addr_index,
371         env->pmp_state.pmp[addr_index].addr_reg);
372     if (addr_index < MAX_RISCV_PMPS) {
373         return env->pmp_state.pmp[addr_index].addr_reg;
374     } else {
375         PMP_DEBUG("ignoring read - out of bounds");
376         return 0;
377     }
378 }
379 
380 #endif
381