1 /* 2 * Copyright (c) 2009 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Sepherosa Ziehau <sepherosa@gmail.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 #include <sys/param.h> 36 #include <sys/kernel.h> 37 #include <sys/systm.h> 38 #include <sys/globaldata.h> 39 40 #include <machine/md_var.h> 41 #include <machine/cpufunc.h> 42 #include <machine/cpufreq.h> 43 #include <machine/cputypes.h> 44 #include <machine/specialreg.h> 45 46 #include "acpi.h" 47 #include "acpi_cpu_pstate.h" 48 49 #define AMD_APMI_HWPSTATE 0x80 50 51 #define AMD_MSR_PSTATE_CSR_MASK 0x7ULL 52 #define AMD1X_MSR_PSTATE_CTL 0xc0010062 53 #define AMD1X_MSR_PSTATE_ST 0xc0010063 54 55 #define AMD_MSR_PSTATE_EN 0x8000000000000000ULL 56 57 #define AMD10_MSR_PSTATE_START 0xc0010064 58 #define AMD10_MSR_PSTATE_COUNT 5 59 60 #define AMD0F_PST_CTL_FID(cval) (((cval) >> 0) & 0x3f) 61 #define AMD0F_PST_CTL_VID(cval) (((cval) >> 6) & 0x1f) 62 #define AMD0F_PST_CTL_VST(cval) (((cval) >> 11) & 0x7f) 63 #define AMD0F_PST_CTL_MVS(cval) (((cval) >> 18) & 0x3) 64 #define AMD0F_PST_CTL_PLLTIME(cval) (((cval) >> 20) & 0x7f) 65 #define AMD0F_PST_CTL_RVO(cval) (((cval) >> 28) & 0x3) 66 #define AMD0F_PST_CTL_IRT(cval) (((cval) >> 30) & 0x3) 67 68 #define AMD0F_PST_ST_FID(sval) (((sval) >> 0) & 0x3f) 69 #define AMD0F_PST_ST_VID(sval) (((sval) >> 6) & 0x3f) 70 71 #define INTEL_MSR_MISC_ENABLE 0x1a0 72 #define INTEL_MSR_MISC_EST_EN 0x10000ULL 73 74 #define INTEL_MSR_PERF_STATUS 0x198 75 #define INTEL_MSR_PERF_CTL 0x199 76 #define INTEL_MSR_PERF_MASK 0xffffULL 77 78 static const struct acpi_pst_md * 79 acpi_pst_amd_probe(void); 80 static int acpi_pst_amd_check_csr(const struct acpi_pst_res *, 81 const struct acpi_pst_res *); 82 static int acpi_pst_amd1x_check_pstates(const struct acpi_pstate *, int, 83 uint32_t, uint32_t); 84 static int acpi_pst_amd10_check_pstates(const struct acpi_pstate *, int); 85 static int acpi_pst_amd0f_check_pstates(const struct acpi_pstate *, int); 86 static int acpi_pst_amd_init(const struct acpi_pst_res *, 87 const struct acpi_pst_res *); 88 static int acpi_pst_amd1x_set_pstate(const struct acpi_pst_res *, 89 const struct acpi_pst_res *, const struct acpi_pstate *); 90 static int acpi_pst_amd0f_set_pstate(const struct acpi_pst_res *, 91 const struct acpi_pst_res *, const struct acpi_pstate *); 92 static const struct acpi_pstate * 93 acpi_pst_amd1x_get_pstate(const struct acpi_pst_res *, 94 const struct acpi_pstate *, int); 95 static const struct acpi_pstate * 96 acpi_pst_amd0f_get_pstate(const struct acpi_pst_res *, 97 const struct acpi_pstate *, int); 98 99 static const struct acpi_pst_md * 100 acpi_pst_intel_probe(void); 101 static int acpi_pst_intel_check_csr(const struct acpi_pst_res *, 102 const struct acpi_pst_res *); 103 static int acpi_pst_intel_check_pstates(const struct acpi_pstate *, int); 104 static int acpi_pst_intel_init(const struct acpi_pst_res *, 105 const struct acpi_pst_res *); 106 static int acpi_pst_intel_set_pstate(const struct acpi_pst_res *, 107 const struct acpi_pst_res *, const struct acpi_pstate *); 108 static const struct acpi_pstate * 109 acpi_pst_intel_get_pstate(const struct acpi_pst_res *, 110 const struct acpi_pstate *, int); 111 112 static int acpi_pst_md_gas_asz(const ACPI_GENERIC_ADDRESS *); 113 static int acpi_pst_md_gas_verify(const ACPI_GENERIC_ADDRESS *); 114 static uint32_t acpi_pst_md_res_read(const struct acpi_pst_res *); 115 static void acpi_pst_md_res_write(const struct acpi_pst_res *, uint32_t); 116 117 static const struct acpi_pst_md acpi_pst_amd10 = { 118 .pmd_check_csr = acpi_pst_amd_check_csr, 119 .pmd_check_pstates = acpi_pst_amd10_check_pstates, 120 .pmd_init = acpi_pst_amd_init, 121 .pmd_set_pstate = acpi_pst_amd1x_set_pstate, 122 .pmd_get_pstate = acpi_pst_amd1x_get_pstate 123 }; 124 125 static const struct acpi_pst_md acpi_pst_amd0f = { 126 .pmd_check_csr = acpi_pst_amd_check_csr, 127 .pmd_check_pstates = acpi_pst_amd0f_check_pstates, 128 .pmd_init = acpi_pst_amd_init, 129 .pmd_set_pstate = acpi_pst_amd0f_set_pstate, 130 .pmd_get_pstate = acpi_pst_amd0f_get_pstate 131 }; 132 133 static const struct acpi_pst_md acpi_pst_intel = { 134 .pmd_check_csr = acpi_pst_intel_check_csr, 135 .pmd_check_pstates = acpi_pst_intel_check_pstates, 136 .pmd_init = acpi_pst_intel_init, 137 .pmd_set_pstate = acpi_pst_intel_set_pstate, 138 .pmd_get_pstate = acpi_pst_intel_get_pstate 139 }; 140 141 static int acpi_pst_stringent_check = 1; 142 TUNABLE_INT("hw.acpi.cpu.pstate.strigent_check", &acpi_pst_stringent_check); 143 144 const struct acpi_pst_md * 145 acpi_pst_md_probe(void) 146 { 147 if (cpu_vendor_id == CPU_VENDOR_AMD) 148 return acpi_pst_amd_probe(); 149 else if (cpu_vendor_id == CPU_VENDOR_INTEL) 150 return acpi_pst_intel_probe(); 151 return NULL; 152 } 153 154 static const struct acpi_pst_md * 155 acpi_pst_amd_probe(void) 156 { 157 uint32_t regs[4]; 158 159 /* Only Family >= 0fh has P-State support */ 160 if (CPUID_TO_FAMILY(cpu_id) < 0xf) 161 return NULL; 162 163 /* Check whether APMI exists */ 164 if (cpu_exthigh < 0x80000007) 165 return NULL; 166 167 /* Fetch APMI */ 168 do_cpuid(0x80000007, regs); 169 170 if (CPUID_TO_FAMILY(cpu_id) == 0xf) { /* Family 0fh */ 171 if ((regs[3] & 0x06) == 0x06) 172 return &acpi_pst_amd0f; 173 } else if (CPUID_TO_FAMILY(cpu_id) >= 0x10) { /* Family >= 10h */ 174 if (regs[3] & 0x80) 175 return &acpi_pst_amd10; 176 } 177 return NULL; 178 } 179 180 static int 181 acpi_pst_amd_check_csr(const struct acpi_pst_res *ctrl, 182 const struct acpi_pst_res *status) 183 { 184 if (ctrl->pr_gas.SpaceId != ACPI_ADR_SPACE_FIXED_HARDWARE) { 185 kprintf("cpu%d: Invalid P-State control register\n", mycpuid); 186 return EINVAL; 187 } 188 if (status->pr_gas.SpaceId != ACPI_ADR_SPACE_FIXED_HARDWARE) { 189 kprintf("cpu%d: Invalid P-State status register\n", mycpuid); 190 return EINVAL; 191 } 192 return 0; 193 } 194 195 static int 196 acpi_pst_amd1x_check_pstates(const struct acpi_pstate *pstates, int npstates, 197 uint32_t msr_start, uint32_t msr_end) 198 { 199 int i; 200 201 /* 202 * Make sure that related MSR P-State registers are enabled. 203 * 204 * NOTE: 205 * We don't check status register value here; 206 * it will not be used. 207 */ 208 for (i = 0; i < npstates; ++i) { 209 uint64_t pstate; 210 uint32_t msr; 211 212 msr = msr_start + 213 (pstates[i].st_cval & AMD_MSR_PSTATE_CSR_MASK); 214 if (msr >= msr_end) { 215 kprintf("cpu%d: MSR P-State register %#08x " 216 "does not exist\n", mycpuid, msr); 217 return EINVAL; 218 } 219 220 pstate = rdmsr(msr); 221 if ((pstate & AMD_MSR_PSTATE_EN) == 0) { 222 kprintf("cpu%d: MSR P-State register %#08x " 223 "is not enabled\n", mycpuid, msr); 224 return EINVAL; 225 } 226 } 227 return 0; 228 } 229 230 static int 231 acpi_pst_amd10_check_pstates(const struct acpi_pstate *pstates, int npstates) 232 { 233 /* Only P0-P4 are supported */ 234 if (npstates > AMD10_MSR_PSTATE_COUNT) { 235 kprintf("cpu%d: only P0-P4 is allowed\n", mycpuid); 236 return EINVAL; 237 } 238 239 return acpi_pst_amd1x_check_pstates(pstates, npstates, 240 AMD10_MSR_PSTATE_START, 241 AMD10_MSR_PSTATE_START + AMD10_MSR_PSTATE_COUNT); 242 } 243 244 static int 245 acpi_pst_amd1x_set_pstate(const struct acpi_pst_res *ctrl __unused, 246 const struct acpi_pst_res *status __unused, 247 const struct acpi_pstate *pstate) 248 { 249 uint64_t cval; 250 251 cval = pstate->st_cval & AMD_MSR_PSTATE_CSR_MASK; 252 wrmsr(AMD1X_MSR_PSTATE_CTL, cval); 253 254 /* 255 * Don't check AMD1X_MSR_PSTATE_ST here, since it is 256 * affected by various P-State limits. 257 * 258 * For details: 259 * AMD Family 10h Processor BKDG Rev 3.20 (#31116) 260 * 2.4.2.4 P-state Transition Behavior 261 */ 262 263 return 0; 264 } 265 266 static const struct acpi_pstate * 267 acpi_pst_amd1x_get_pstate(const struct acpi_pst_res *status __unused, 268 const struct acpi_pstate *pstates, int npstates) 269 { 270 uint64_t sval; 271 int i; 272 273 sval = rdmsr(AMD1X_MSR_PSTATE_ST) & AMD_MSR_PSTATE_CSR_MASK; 274 for (i = 0; i < npstates; ++i) { 275 if ((pstates[i].st_sval & AMD_MSR_PSTATE_CSR_MASK) == sval) 276 return &pstates[i]; 277 } 278 return NULL; 279 } 280 281 static int 282 acpi_pst_amd0f_check_pstates(const struct acpi_pstate *pstates, int npstates) 283 { 284 struct amd0f_fidvid fv_max, fv_min; 285 int i; 286 287 amd0f_fidvid_limit(&fv_min, &fv_max); 288 289 if (fv_min.fid == fv_max.fid && fv_min.vid == fv_max.vid) { 290 kprintf("cpu%d: only one P-State is supported\n", mycpuid); 291 if (acpi_pst_stringent_check) 292 return EOPNOTSUPP; 293 } 294 295 for (i = 0; i < npstates; ++i) { 296 const struct acpi_pstate *p = &pstates[i]; 297 uint32_t fid, vid, mvs, rvo; 298 int mvs_mv, rvo_mv; 299 300 fid = AMD0F_PST_CTL_FID(p->st_cval); 301 vid = AMD0F_PST_CTL_VID(p->st_cval); 302 303 if (i == 0) { 304 if (vid != fv_max.vid) { 305 kprintf("cpu%d: max VID mismatch " 306 "real %u, lim %d\n", mycpuid, 307 vid, fv_max.vid); 308 } 309 if (fid != fv_max.fid) { 310 kprintf("cpu%d: max FID mismatch " 311 "real %u, lim %d\n", mycpuid, 312 fid, fv_max.fid); 313 } 314 } else if (i == npstates - 1) { 315 if (vid != fv_min.vid) { 316 kprintf("cpu%d: min VID mismatch " 317 "real %u, lim %d\n", mycpuid, 318 vid, fv_min.vid); 319 } 320 if (fid != fv_min.fid) { 321 kprintf("cpu%d: min FID mismatch " 322 "real %u, lim %d\n", mycpuid, 323 fid, fv_min.fid); 324 } 325 } else { 326 if (fid >= fv_max.fid || fid < (fv_min.fid + 0x8)) { 327 kprintf("cpu%d: Invalid FID %#x, " 328 "out [%#x, %#x]\n", mycpuid, fid, 329 fv_min.fid + 0x8, fv_max.fid); 330 if (acpi_pst_stringent_check) 331 return EINVAL; 332 } 333 if (vid < fv_max.vid || vid > fv_min.vid) { 334 kprintf("cpu%d: Invalid VID %#x, " 335 "in [%#x, %#x]\n", mycpuid, vid, 336 fv_max.vid, fv_min.vid); 337 if (acpi_pst_stringent_check) 338 return EINVAL; 339 } 340 } 341 342 mvs = AMD0F_PST_CTL_MVS(p->st_cval); 343 rvo = AMD0F_PST_CTL_RVO(p->st_cval); 344 345 /* Only 0 is allowed, i.e. 25mV stepping */ 346 if (mvs != 0) { 347 kprintf("cpu%d: Invalid MVS %#x\n", mycpuid, mvs); 348 return EINVAL; 349 } 350 351 /* -> mV */ 352 mvs_mv = 25 * (1 << mvs); 353 rvo_mv = 25 * rvo; 354 if (rvo_mv % mvs_mv != 0) { 355 kprintf("cpu%d: Invalid MVS/RVO (%#x/%#x)\n", 356 mycpuid, mvs, rvo); 357 return EINVAL; 358 } 359 } 360 return 0; 361 } 362 363 static int 364 acpi_pst_amd0f_set_pstate(const struct acpi_pst_res *ctrl __unused, 365 const struct acpi_pst_res *status __unused, 366 const struct acpi_pstate *pstate) 367 { 368 struct amd0f_fidvid fv; 369 struct amd0f_xsit xsit; 370 371 fv.fid = AMD0F_PST_CTL_FID(pstate->st_cval); 372 fv.vid = AMD0F_PST_CTL_VID(pstate->st_cval); 373 374 xsit.rvo = AMD0F_PST_CTL_RVO(pstate->st_cval); 375 xsit.mvs = AMD0F_PST_CTL_MVS(pstate->st_cval); 376 xsit.vst = AMD0F_PST_CTL_VST(pstate->st_cval); 377 xsit.pll_time = AMD0F_PST_CTL_PLLTIME(pstate->st_cval); 378 xsit.irt = AMD0F_PST_CTL_IRT(pstate->st_cval); 379 380 return amd0f_set_fidvid(&fv, &xsit); 381 } 382 383 static const struct acpi_pstate * 384 acpi_pst_amd0f_get_pstate(const struct acpi_pst_res *status __unused, 385 const struct acpi_pstate *pstates, int npstates) 386 { 387 struct amd0f_fidvid fv; 388 int error, i; 389 390 error = amd0f_get_fidvid(&fv); 391 if (error) 392 return NULL; 393 394 for (i = 0; i < npstates; ++i) { 395 const struct acpi_pstate *p = &pstates[i]; 396 397 if (fv.fid == AMD0F_PST_ST_FID(p->st_sval) && 398 fv.vid == AMD0F_PST_ST_VID(p->st_sval)) 399 return p; 400 } 401 return NULL; 402 } 403 404 static int 405 acpi_pst_amd_init(const struct acpi_pst_res *ctrl __unused, 406 const struct acpi_pst_res *status __unused) 407 { 408 return 0; 409 } 410 411 static const struct acpi_pst_md * 412 acpi_pst_intel_probe(void) 413 { 414 if ((cpu_feature2 & CPUID2_EST) == 0) 415 return NULL; 416 417 if (CPUID_TO_FAMILY(cpu_id) >= 0xf || CPUID_TO_FAMILY(cpu_id) == 0x6) 418 return &acpi_pst_intel; 419 420 return NULL; 421 } 422 423 static int 424 acpi_pst_intel_check_csr(const struct acpi_pst_res *ctrl, 425 const struct acpi_pst_res *status) 426 { 427 int error; 428 429 if (ctrl->pr_gas.SpaceId != status->pr_gas.SpaceId) { 430 kprintf("cpu%d: P-State control(%d)/status(%d) registers have " 431 "different SpaceId", mycpuid, 432 ctrl->pr_gas.SpaceId, status->pr_gas.SpaceId); 433 return EINVAL; 434 } 435 436 switch (ctrl->pr_gas.SpaceId) { 437 case ACPI_ADR_SPACE_FIXED_HARDWARE: 438 if (ctrl->pr_res != NULL || status->pr_res != NULL) { 439 /* XXX should panic() */ 440 kprintf("cpu%d: Allocated resource for fixed hardware " 441 "registers\n", mycpuid); 442 return EINVAL; 443 } 444 break; 445 446 case ACPI_ADR_SPACE_SYSTEM_IO: 447 if (ctrl->pr_res == NULL) { 448 kprintf("cpu%d: ioport allocation failed for control " 449 "register\n", mycpuid); 450 return ENXIO; 451 } 452 error = acpi_pst_md_gas_verify(&ctrl->pr_gas); 453 if (error) { 454 kprintf("cpu%d: Invalid control register GAS\n", 455 mycpuid); 456 return error; 457 } 458 459 if (status->pr_res == NULL) { 460 kprintf("cpu%d: ioport allocation failed for status " 461 "register\n", mycpuid); 462 return ENXIO; 463 } 464 error = acpi_pst_md_gas_verify(&status->pr_gas); 465 if (error) { 466 kprintf("cpu%d: Invalid status register GAS\n", 467 mycpuid); 468 return error; 469 } 470 break; 471 472 default: 473 kprintf("cpu%d: Invalid P-State control/status register " 474 "SpaceId %d\n", mycpuid, ctrl->pr_gas.SpaceId); 475 return EOPNOTSUPP; 476 } 477 return 0; 478 } 479 480 static int 481 acpi_pst_intel_check_pstates(const struct acpi_pstate *pstates __unused, 482 int npstates __unused) 483 { 484 return 0; 485 } 486 487 static int 488 acpi_pst_intel_init(const struct acpi_pst_res *ctrl __unused, 489 const struct acpi_pst_res *status __unused) 490 { 491 uint64_t misc_enable; 492 493 if (CPUID_TO_FAMILY(cpu_id) == 0xf || 494 (CPUID_TO_FAMILY(cpu_id) == 0x6 && CPUID_TO_MODEL(cpu_id) < 0xd)) { 495 /* EST enable bit is reserved in INTEL_MSR_MISC_ENABLE */ 496 return 0; 497 } 498 499 misc_enable = rdmsr(INTEL_MSR_MISC_ENABLE); 500 if ((misc_enable & INTEL_MSR_MISC_EST_EN) == 0) { 501 misc_enable |= INTEL_MSR_MISC_EST_EN; 502 wrmsr(INTEL_MSR_MISC_ENABLE, misc_enable); 503 504 misc_enable = rdmsr(INTEL_MSR_MISC_ENABLE); 505 if ((misc_enable & INTEL_MSR_MISC_EST_EN) == 0) { 506 kprintf("cpu%d: Can't enable EST\n", mycpuid); 507 return EIO; 508 } 509 } 510 return 0; 511 } 512 513 static int 514 acpi_pst_intel_set_pstate(const struct acpi_pst_res *ctrl, 515 const struct acpi_pst_res *status __unused, 516 const struct acpi_pstate *pstate) 517 { 518 if (ctrl->pr_res != NULL) { 519 acpi_pst_md_res_write(ctrl, pstate->st_cval); 520 } else { 521 uint64_t ctl; 522 523 ctl = rdmsr(INTEL_MSR_PERF_CTL); 524 ctl &= ~INTEL_MSR_PERF_MASK; 525 ctl |= (pstate->st_cval & INTEL_MSR_PERF_MASK); 526 wrmsr(INTEL_MSR_PERF_CTL, ctl); 527 } 528 return 0; 529 } 530 531 static const struct acpi_pstate * 532 acpi_pst_intel_get_pstate(const struct acpi_pst_res *status, 533 const struct acpi_pstate *pstates, int npstates) 534 { 535 int i; 536 537 if (status->pr_res != NULL) { 538 uint32_t st; 539 540 st = acpi_pst_md_res_read(status); 541 for (i = 0; i < npstates; ++i) { 542 if (pstates[i].st_sval == st) 543 return &pstates[i]; 544 } 545 } else { 546 uint64_t sval; 547 548 sval = rdmsr(INTEL_MSR_PERF_STATUS) & INTEL_MSR_PERF_MASK; 549 for (i = 0; i < npstates; ++i) { 550 if ((pstates[i].st_sval & INTEL_MSR_PERF_MASK) == sval) 551 return &pstates[i]; 552 } 553 } 554 return NULL; 555 } 556 557 static int 558 acpi_pst_md_gas_asz(const ACPI_GENERIC_ADDRESS *gas) 559 { 560 int asz; 561 562 if (gas->AccessWidth != 0) 563 asz = gas->AccessWidth; 564 else 565 asz = gas->BitWidth / NBBY; 566 switch (asz) { 567 case 1: 568 case 2: 569 case 4: 570 break; 571 default: 572 asz = 0; 573 break; 574 } 575 return asz; 576 } 577 578 static int 579 acpi_pst_md_gas_verify(const ACPI_GENERIC_ADDRESS *gas) 580 { 581 int reg, end, asz; 582 583 if (gas->BitOffset % NBBY != 0) 584 return EINVAL; 585 586 end = gas->BitWidth / NBBY; 587 reg = gas->BitOffset / NBBY; 588 589 if (reg >= end) 590 return EINVAL; 591 592 asz = acpi_pst_md_gas_asz(gas); 593 if (asz == 0) 594 return EINVAL; 595 596 if (reg + asz > end) 597 return EINVAL; 598 return 0; 599 } 600 601 static uint32_t 602 acpi_pst_md_res_read(const struct acpi_pst_res *res) 603 { 604 int asz, reg; 605 606 KKASSERT(res->pr_res != NULL); 607 asz = acpi_pst_md_gas_asz(&res->pr_gas); 608 reg = res->pr_gas.BitOffset / NBBY; 609 610 switch (asz) { 611 case 1: 612 return bus_space_read_1(res->pr_bt, res->pr_bh, reg); 613 case 2: 614 return bus_space_read_2(res->pr_bt, res->pr_bh, reg); 615 case 4: 616 return bus_space_read_4(res->pr_bt, res->pr_bh, reg); 617 } 618 panic("unsupported access width %d", asz); 619 620 /* NEVER REACHED */ 621 return 0; 622 } 623 624 static void 625 acpi_pst_md_res_write(const struct acpi_pst_res *res, uint32_t val) 626 { 627 int asz, reg; 628 629 KKASSERT(res->pr_res != NULL); 630 asz = acpi_pst_md_gas_asz(&res->pr_gas); 631 reg = res->pr_gas.BitOffset / NBBY; 632 633 switch (asz) { 634 case 1: 635 bus_space_write_1(res->pr_bt, res->pr_bh, reg, val); 636 break; 637 case 2: 638 bus_space_write_2(res->pr_bt, res->pr_bh, reg, val); 639 break; 640 case 4: 641 bus_space_write_4(res->pr_bt, res->pr_bh, reg, val); 642 break; 643 default: 644 panic("unsupported access width %d", asz); 645 } 646 } 647