1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2012 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 /* 31 * This file and its contents are supplied under the terms of the 32 * Common Development and Distribution License ("CDDL"), version 1.0. 33 * You may only use this file in accordance with the terms of version 34 * 1.0 of the CDDL. 35 * 36 * A full copy of the text of the CDDL should have accompanied this 37 * source. A copy of the CDDL is also available via the Internet at 38 * http://www.illumos.org/license/CDDL. 39 * 40 * Copyright 2020 Oxide Computer Company 41 */ 42 43 /* 44 * Memory ranges are represented with an RB tree. On insertion, the range 45 * is checked for overlaps. On lookup, the key has the same base and limit 46 * so it can be searched within the range. 47 */ 48 49 #include <sys/cdefs.h> 50 __FBSDID("$FreeBSD$"); 51 52 #include <sys/types.h> 53 #include <sys/errno.h> 54 #include <sys/tree.h> 55 #include <machine/vmm.h> 56 57 #include <assert.h> 58 #include <err.h> 59 #include <pthread.h> 60 #include <stdio.h> 61 #include <stdlib.h> 62 63 #include "mem.h" 64 65 struct mmio_rb_range { 66 RB_ENTRY(mmio_rb_range) mr_link; /* RB tree links */ 67 struct mem_range mr_param; 68 uint64_t mr_base; 69 uint64_t mr_end; 70 }; 71 72 struct mmio_rb_tree; 73 RB_PROTOTYPE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare); 74 75 RB_HEAD(mmio_rb_tree, mmio_rb_range) mmio_rb_root, mmio_rb_fallback; 76 77 /* 78 * Per-vCPU cache. Since most accesses from a vCPU will be to 79 * consecutive addresses in a range, it makes sense to cache the 80 * result of a lookup. 81 */ 82 static struct mmio_rb_range *mmio_hint[VM_MAXCPU]; 83 84 static pthread_rwlock_t mmio_rwlock; 85 86 static int 87 mmio_rb_range_compare(struct mmio_rb_range *a, struct mmio_rb_range *b) 88 { 89 if (a->mr_end < b->mr_base) 90 return (-1); 91 else if (a->mr_base > b->mr_end) 92 return (1); 93 return (0); 94 } 95 96 static int 97 mmio_rb_lookup(struct mmio_rb_tree *rbt, uint64_t addr, 98 struct mmio_rb_range **entry) 99 { 100 struct mmio_rb_range find, *res; 101 102 find.mr_base = find.mr_end = addr; 103 104 res = RB_FIND(mmio_rb_tree, rbt, &find); 105 106 if (res != NULL) { 107 *entry = res; 108 return (0); 109 } 110 111 return (ENOENT); 112 } 113 114 static int 115 mmio_rb_add(struct mmio_rb_tree *rbt, struct mmio_rb_range *new) 116 { 117 struct mmio_rb_range *overlap; 118 119 overlap = RB_INSERT(mmio_rb_tree, rbt, new); 120 121 if (overlap != NULL) { 122 #ifdef RB_DEBUG 123 printf("overlap detected: new %lx:%lx, tree %lx:%lx, '%s' " 124 "claims region already claimed for '%s'\n", 125 new->mr_base, new->mr_end, 126 overlap->mr_base, overlap->mr_end, 127 new->mr_param.name, overlap->mr_param.name); 128 #endif 129 130 return (EEXIST); 131 } 132 133 return (0); 134 } 135 136 #if 0 137 static void 138 mmio_rb_dump(struct mmio_rb_tree *rbt) 139 { 140 int perror; 141 struct mmio_rb_range *np; 142 143 pthread_rwlock_rdlock(&mmio_rwlock); 144 RB_FOREACH(np, mmio_rb_tree, rbt) { 145 printf(" %lx:%lx, %s\n", np->mr_base, np->mr_end, 146 np->mr_param.name); 147 } 148 perror = pthread_rwlock_unlock(&mmio_rwlock); 149 assert(perror == 0); 150 } 151 #endif 152 153 RB_GENERATE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare); 154 155 typedef int (mem_cb_t)(struct vmctx *ctx, int vcpu, uint64_t gpa, 156 struct mem_range *mr, void *arg); 157 158 static int 159 mem_read(void *ctx, int vcpu, uint64_t gpa, uint64_t *rval, int size, void *arg) 160 { 161 int error; 162 struct mem_range *mr = arg; 163 164 error = (*mr->handler)(ctx, vcpu, MEM_F_READ, gpa, size, 165 rval, mr->arg1, mr->arg2); 166 return (error); 167 } 168 169 static int 170 mem_write(void *ctx, int vcpu, uint64_t gpa, uint64_t wval, int size, void *arg) 171 { 172 int error; 173 struct mem_range *mr = arg; 174 175 error = (*mr->handler)(ctx, vcpu, MEM_F_WRITE, gpa, size, 176 &wval, mr->arg1, mr->arg2); 177 return (error); 178 } 179 180 static int 181 access_memory(struct vmctx *ctx, int vcpu, uint64_t paddr, mem_cb_t *cb, 182 void *arg) 183 { 184 struct mmio_rb_range *entry; 185 int err, perror, immutable; 186 187 pthread_rwlock_rdlock(&mmio_rwlock); 188 /* 189 * First check the per-vCPU cache 190 */ 191 if (mmio_hint[vcpu] && 192 paddr >= mmio_hint[vcpu]->mr_base && 193 paddr <= mmio_hint[vcpu]->mr_end) { 194 entry = mmio_hint[vcpu]; 195 } else 196 entry = NULL; 197 198 if (entry == NULL) { 199 if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0) { 200 /* Update the per-vCPU cache */ 201 mmio_hint[vcpu] = entry; 202 } else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) { 203 perror = pthread_rwlock_unlock(&mmio_rwlock); 204 assert(perror == 0); 205 return (ESRCH); 206 } 207 } 208 209 assert(entry != NULL); 210 211 /* 212 * An 'immutable' memory range is guaranteed to be never removed 213 * so there is no need to hold 'mmio_rwlock' while calling the 214 * handler. 215 * 216 * XXX writes to the PCIR_COMMAND register can cause register_mem() 217 * to be called. If the guest is using PCI extended config space 218 * to modify the PCIR_COMMAND register then register_mem() can 219 * deadlock on 'mmio_rwlock'. However by registering the extended 220 * config space window as 'immutable' the deadlock can be avoided. 221 */ 222 immutable = (entry->mr_param.flags & MEM_F_IMMUTABLE); 223 if (immutable) { 224 perror = pthread_rwlock_unlock(&mmio_rwlock); 225 assert(perror == 0); 226 } 227 228 err = cb(ctx, vcpu, paddr, &entry->mr_param, arg); 229 230 if (!immutable) { 231 perror = pthread_rwlock_unlock(&mmio_rwlock); 232 assert(perror == 0); 233 } 234 235 236 return (err); 237 } 238 239 static int 240 emulate_mem_cb(struct vmctx *ctx, int vcpu, uint64_t paddr, struct mem_range *mr, 241 void *arg) 242 { 243 struct vm_mmio *mmio; 244 int err = 0; 245 246 mmio = arg; 247 248 if (mmio->read != 0) { 249 err = mem_read(ctx, vcpu, paddr, &mmio->data, mmio->bytes, mr); 250 } else { 251 err = mem_write(ctx, vcpu, paddr, mmio->data, mmio->bytes, mr); 252 } 253 254 return (err); 255 } 256 257 int 258 emulate_mem(struct vmctx *ctx, int vcpu, struct vm_mmio *mmio) 259 { 260 return (access_memory(ctx, vcpu, mmio->gpa, emulate_mem_cb, mmio)); 261 } 262 263 struct rw_mem_args { 264 uint64_t *val; 265 int size; 266 int operation; 267 }; 268 269 static int 270 rw_mem_cb(struct vmctx *ctx, int vcpu, uint64_t paddr, struct mem_range *mr, 271 void *arg) 272 { 273 struct rw_mem_args *rma; 274 275 rma = arg; 276 return (mr->handler(ctx, vcpu, rma->operation, paddr, rma->size, 277 rma->val, mr->arg1, mr->arg2)); 278 } 279 280 int 281 read_mem(struct vmctx *ctx, int vcpu, uint64_t gpa, uint64_t *rval, int size) 282 { 283 struct rw_mem_args rma; 284 285 rma.val = rval; 286 rma.size = size; 287 rma.operation = MEM_F_READ; 288 return (access_memory(ctx, vcpu, gpa, rw_mem_cb, &rma)); 289 } 290 291 int 292 write_mem(struct vmctx *ctx, int vcpu, uint64_t gpa, uint64_t wval, int size) 293 { 294 struct rw_mem_args rma; 295 296 rma.val = &wval; 297 rma.size = size; 298 rma.operation = MEM_F_WRITE; 299 return (access_memory(ctx, vcpu, gpa, rw_mem_cb, &rma)); 300 } 301 302 static int 303 register_mem_int(struct mmio_rb_tree *rbt, struct mem_range *memp) 304 { 305 struct mmio_rb_range *entry, *mrp; 306 int err, perror; 307 308 err = 0; 309 310 mrp = malloc(sizeof(struct mmio_rb_range)); 311 if (mrp == NULL) { 312 warn("%s: couldn't allocate memory for mrp\n", 313 __func__); 314 err = ENOMEM; 315 } else { 316 mrp->mr_param = *memp; 317 mrp->mr_base = memp->base; 318 mrp->mr_end = memp->base + memp->size - 1; 319 pthread_rwlock_wrlock(&mmio_rwlock); 320 if (mmio_rb_lookup(rbt, memp->base, &entry) != 0) 321 err = mmio_rb_add(rbt, mrp); 322 perror = pthread_rwlock_unlock(&mmio_rwlock); 323 assert(perror == 0); 324 if (err) 325 free(mrp); 326 } 327 328 return (err); 329 } 330 331 int 332 register_mem(struct mem_range *memp) 333 { 334 335 return (register_mem_int(&mmio_rb_root, memp)); 336 } 337 338 int 339 register_mem_fallback(struct mem_range *memp) 340 { 341 342 return (register_mem_int(&mmio_rb_fallback, memp)); 343 } 344 345 int 346 unregister_mem(struct mem_range *memp) 347 { 348 struct mem_range *mr; 349 struct mmio_rb_range *entry = NULL; 350 int err, perror, i; 351 352 pthread_rwlock_wrlock(&mmio_rwlock); 353 err = mmio_rb_lookup(&mmio_rb_root, memp->base, &entry); 354 if (err == 0) { 355 mr = &entry->mr_param; 356 assert(mr->name == memp->name); 357 assert(mr->base == memp->base && mr->size == memp->size); 358 assert((mr->flags & MEM_F_IMMUTABLE) == 0); 359 RB_REMOVE(mmio_rb_tree, &mmio_rb_root, entry); 360 361 /* flush Per-vCPU cache */ 362 for (i=0; i < VM_MAXCPU; i++) { 363 if (mmio_hint[i] == entry) 364 mmio_hint[i] = NULL; 365 } 366 } 367 perror = pthread_rwlock_unlock(&mmio_rwlock); 368 assert(perror == 0); 369 370 if (entry) 371 free(entry); 372 373 return (err); 374 } 375 376 void 377 init_mem(void) 378 { 379 380 RB_INIT(&mmio_rb_root); 381 RB_INIT(&mmio_rb_fallback); 382 pthread_rwlock_init(&mmio_rwlock, NULL); 383 } 384