1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/types.h> 27 #include <sys/sysmacros.h> 28 #include <sys/ddi.h> 29 #include <sys/async.h> 30 #include <sys/sunddi.h> 31 #include <sys/ddifm.h> 32 #include <sys/fm/protocol.h> 33 #include <sys/vmem.h> 34 #include <sys/intr.h> 35 #include <sys/ivintr.h> 36 #include <sys/errno.h> 37 #include <sys/hypervisor_api.h> 38 #include <sys/hsvc.h> 39 #include <px_obj.h> 40 #include <sys/machsystm.h> 41 #include <sys/hotplug/pci/pcihp.h> 42 #include "px_lib4v.h" 43 #include "px_err.h" 44 45 /* mask for the ranges property in calculating the real PFN range */ 46 uint_t px_ranges_phi_mask = ((1 << 28) -1); 47 48 /* 49 * Hypervisor VPCI services information for the px nexus driver. 50 */ 51 static uint64_t px_vpci_min_ver; /* Negotiated VPCI API minor version */ 52 static uint_t px_vpci_users = 0; /* VPCI API users */ 53 54 static hsvc_info_t px_hsvc = { 55 HSVC_REV_1, NULL, HSVC_GROUP_VPCI, PX_VPCI_MAJOR_VER, 56 PX_VPCI_MINOR_VER, "PX" 57 }; 58 59 int 60 px_lib_dev_init(dev_info_t *dip, devhandle_t *dev_hdl) 61 { 62 px_nexus_regspec_t *rp; 63 uint_t reglen; 64 int ret; 65 66 uint64_t mjrnum; 67 uint64_t mnrnum; 68 69 DBG(DBG_ATTACH, dip, "px_lib_dev_init: dip 0x%p\n", dip); 70 71 /* 72 * Check HV intr group api versioning. 73 * This driver uses the old interrupt routines which are supported 74 * in old firmware in the CORE API group and in newer firmware in 75 * the INTR API group. Support for these calls will be dropped 76 * once the INTR API group major goes to 2. 77 */ 78 if ((hsvc_version(HSVC_GROUP_INTR, &mjrnum, &mnrnum) == 0) && 79 (mjrnum > 1)) { 80 cmn_err(CE_WARN, "niumx: unsupported intr api group: " 81 "maj:0x%lx, min:0x%lx", mjrnum, mnrnum); 82 return (ENOTSUP); 83 } 84 85 ret = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 86 "reg", (uchar_t **)&rp, ®len); 87 if (ret != DDI_PROP_SUCCESS) { 88 DBG(DBG_ATTACH, dip, "px_lib_dev_init failed ret=%d\n", ret); 89 return (DDI_FAILURE); 90 } 91 92 /* 93 * Initilize device handle. The device handle uniquely identifies 94 * a SUN4V device. It consists of the lower 28-bits of the hi-cell 95 * of the first entry of the SUN4V device's "reg" property as 96 * defined by the SUN4V Bus Binding to Open Firmware. 97 */ 98 *dev_hdl = (devhandle_t)((rp->phys_addr >> 32) & DEVHDLE_MASK); 99 ddi_prop_free(rp); 100 101 /* 102 * hotplug implementation requires this property to be associated with 103 * any indirect PCI config access services 104 */ 105 (void) ddi_prop_update_int(makedevice(ddi_driver_major(dip), 106 PCIHP_AP_MINOR_NUM(ddi_get_instance(dip), PCIHP_DEVCTL_MINOR)), dip, 107 PCI_BUS_CONF_MAP_PROP, 1); 108 109 DBG(DBG_ATTACH, dip, "px_lib_dev_init: dev_hdl 0x%llx\n", *dev_hdl); 110 111 /* 112 * Negotiate the API version for VPCI hypervisor services. 113 */ 114 if (px_vpci_users++) 115 return (DDI_SUCCESS); 116 117 if ((ret = hsvc_register(&px_hsvc, &px_vpci_min_ver)) != 0) { 118 cmn_err(CE_WARN, "%s: cannot negotiate hypervisor services " 119 "group: 0x%lx major: 0x%lx minor: 0x%lx errno: %d\n", 120 px_hsvc.hsvc_modname, px_hsvc.hsvc_group, 121 px_hsvc.hsvc_major, px_hsvc.hsvc_minor, ret); 122 123 return (DDI_FAILURE); 124 } 125 126 DBG(DBG_ATTACH, dip, "px_lib_dev_init: negotiated VPCI API version, " 127 "major 0x%lx minor 0x%lx\n", px_hsvc.hsvc_major, px_vpci_min_ver); 128 129 return (DDI_SUCCESS); 130 } 131 132 /*ARGSUSED*/ 133 int 134 px_lib_dev_fini(dev_info_t *dip) 135 { 136 DBG(DBG_DETACH, dip, "px_lib_dev_fini: dip 0x%p\n", dip); 137 138 (void) ddi_prop_remove(makedevice(ddi_driver_major(dip), 139 PCIHP_AP_MINOR_NUM(ddi_get_instance(dip), PCIHP_DEVCTL_MINOR)), dip, 140 PCI_BUS_CONF_MAP_PROP); 141 142 if (--px_vpci_users == 0) 143 (void) hsvc_unregister(&px_hsvc); 144 145 return (DDI_SUCCESS); 146 } 147 148 /*ARGSUSED*/ 149 int 150 px_lib_intr_devino_to_sysino(dev_info_t *dip, devino_t devino, 151 sysino_t *sysino) 152 { 153 uint64_t ret; 154 155 DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: dip 0x%p " 156 "devino 0x%x\n", dip, devino); 157 158 if ((ret = hvio_intr_devino_to_sysino(DIP_TO_HANDLE(dip), 159 devino, sysino)) != H_EOK) { 160 DBG(DBG_LIB_INT, dip, 161 "hvio_intr_devino_to_sysino failed, ret 0x%lx\n", ret); 162 return (DDI_FAILURE); 163 } 164 165 DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: sysino 0x%llx\n", 166 *sysino); 167 168 return (DDI_SUCCESS); 169 } 170 171 /*ARGSUSED*/ 172 int 173 px_lib_intr_getvalid(dev_info_t *dip, sysino_t sysino, 174 intr_valid_state_t *intr_valid_state) 175 { 176 uint64_t ret; 177 178 DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: dip 0x%p sysino 0x%llx\n", 179 dip, sysino); 180 181 if ((ret = hvio_intr_getvalid(sysino, 182 (int *)intr_valid_state)) != H_EOK) { 183 DBG(DBG_LIB_INT, dip, "hvio_intr_getvalid failed, ret 0x%lx\n", 184 ret); 185 return (DDI_FAILURE); 186 } 187 188 DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: intr_valid_state 0x%x\n", 189 *intr_valid_state); 190 191 return (DDI_SUCCESS); 192 } 193 194 /*ARGSUSED*/ 195 int 196 px_lib_intr_setvalid(dev_info_t *dip, sysino_t sysino, 197 intr_valid_state_t intr_valid_state) 198 { 199 uint64_t ret; 200 201 DBG(DBG_LIB_INT, dip, "px_lib_intr_setvalid: dip 0x%p sysino 0x%llx " 202 "intr_valid_state 0x%x\n", dip, sysino, intr_valid_state); 203 204 if ((ret = hvio_intr_setvalid(sysino, intr_valid_state)) != H_EOK) { 205 DBG(DBG_LIB_INT, dip, "hvio_intr_setvalid failed, ret 0x%lx\n", 206 ret); 207 return (DDI_FAILURE); 208 } 209 210 return (DDI_SUCCESS); 211 } 212 213 /*ARGSUSED*/ 214 int 215 px_lib_intr_getstate(dev_info_t *dip, sysino_t sysino, 216 intr_state_t *intr_state) 217 { 218 uint64_t ret; 219 220 DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: dip 0x%p sysino 0x%llx\n", 221 dip, sysino); 222 223 if ((ret = hvio_intr_getstate(sysino, (int *)intr_state)) != H_EOK) { 224 DBG(DBG_LIB_INT, dip, "hvio_intr_getstate failed, ret 0x%lx\n", 225 ret); 226 return (DDI_FAILURE); 227 } 228 229 DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: intr_state 0x%x\n", 230 *intr_state); 231 232 return (DDI_SUCCESS); 233 } 234 235 /*ARGSUSED*/ 236 int 237 px_lib_intr_setstate(dev_info_t *dip, sysino_t sysino, 238 intr_state_t intr_state) 239 { 240 uint64_t ret; 241 242 DBG(DBG_LIB_INT, dip, "px_lib_intr_setstate: dip 0x%p sysino 0x%llx " 243 "intr_state 0x%x\n", dip, sysino, intr_state); 244 245 if ((ret = hvio_intr_setstate(sysino, intr_state)) != H_EOK) { 246 DBG(DBG_LIB_INT, dip, "hvio_intr_setstate failed, ret 0x%lx\n", 247 ret); 248 return (DDI_FAILURE); 249 } 250 251 return (DDI_SUCCESS); 252 } 253 254 /*ARGSUSED*/ 255 int 256 px_lib_intr_gettarget(dev_info_t *dip, sysino_t sysino, cpuid_t *cpuid) 257 { 258 uint64_t ret; 259 260 DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: dip 0x%p sysino 0x%llx\n", 261 dip, sysino); 262 263 if ((ret = hvio_intr_gettarget(sysino, cpuid)) != H_EOK) { 264 DBG(DBG_LIB_INT, dip, 265 "hvio_intr_gettarget failed, ret 0x%lx\n", ret); 266 return (DDI_FAILURE); 267 } 268 269 DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: cpuid 0x%x\n", *cpuid); 270 271 return (DDI_SUCCESS); 272 } 273 274 /*ARGSUSED*/ 275 int 276 px_lib_intr_settarget(dev_info_t *dip, sysino_t sysino, cpuid_t cpuid) 277 { 278 uint64_t ret; 279 280 DBG(DBG_LIB_INT, dip, "px_lib_intr_settarget: dip 0x%p sysino 0x%llx " 281 "cpuid 0x%x\n", dip, sysino, cpuid); 282 283 ret = hvio_intr_settarget(sysino, cpuid); 284 if (ret == H_ECPUERROR) { 285 cmn_err(CE_PANIC, 286 "px_lib_intr_settarget: hvio_intr_settarget failed, " 287 "ret = 0x%lx, cpuid = 0x%x, sysino = 0x%lx\n", ret, 288 cpuid, sysino); 289 } else if (ret != H_EOK) { 290 DBG(DBG_LIB_INT, dip, 291 "hvio_intr_settarget failed, ret 0x%lx\n", ret); 292 return (DDI_FAILURE); 293 } 294 295 return (DDI_SUCCESS); 296 } 297 298 /*ARGSUSED*/ 299 int 300 px_lib_intr_reset(dev_info_t *dip) 301 { 302 px_t *px_p = DIP_TO_STATE(dip); 303 px_ib_t *ib_p = px_p->px_ib_p; 304 px_ino_t *ino_p; 305 306 DBG(DBG_LIB_INT, dip, "px_lib_intr_reset: dip 0x%p\n", dip); 307 308 mutex_enter(&ib_p->ib_ino_lst_mutex); 309 310 /* Reset all Interrupts */ 311 for (ino_p = ib_p->ib_ino_lst; ino_p; ino_p = ino_p->ino_next_p) { 312 if (px_lib_intr_setstate(dip, ino_p->ino_sysino, 313 INTR_IDLE_STATE) != DDI_SUCCESS) 314 return (BF_FATAL); 315 } 316 317 mutex_exit(&ib_p->ib_ino_lst_mutex); 318 319 return (BF_NONE); 320 } 321 322 /*ARGSUSED*/ 323 int 324 px_lib_iommu_map(dev_info_t *dip, tsbid_t tsbid, pages_t pages, 325 io_attributes_t attr, void *addr, size_t pfn_index, int flags) 326 { 327 tsbnum_t tsb_num = PCI_TSBID_TO_TSBNUM(tsbid); 328 tsbindex_t tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid); 329 io_page_list_t *pfns, *pfn_p; 330 pages_t ttes_mapped = 0; 331 int i, err = DDI_SUCCESS; 332 333 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_map: dip 0x%p tsbid 0x%llx " 334 "pages 0x%x attr 0x%llx addr 0x%p pfn_index 0x%llx flags 0x%x\n", 335 dip, tsbid, pages, attr, addr, pfn_index, flags); 336 337 if ((pfns = pfn_p = kmem_zalloc((pages * sizeof (io_page_list_t)), 338 KM_NOSLEEP)) == NULL) { 339 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_map: kmem_zalloc failed\n"); 340 return (DDI_FAILURE); 341 } 342 343 for (i = 0; i < pages; i++) 344 pfns[i] = MMU_PTOB(PX_ADDR2PFN(addr, pfn_index, flags, i)); 345 346 /* 347 * If HV VPCI version is 1.1 and higher, pass BDF, phantom function, 348 * and relaxed ordering attributes. Otherwise, pass only read or write 349 * attribute. 350 */ 351 if (px_vpci_min_ver == PX_VPCI_MINOR_VER_0) 352 attr = attr & (PCI_MAP_ATTR_READ | PCI_MAP_ATTR_WRITE); 353 354 while ((ttes_mapped = pfn_p - pfns) < pages) { 355 uintptr_t ra = va_to_pa(pfn_p); 356 pages_t ttes2map; 357 uint64_t ret; 358 359 ttes2map = (MMU_PAGE_SIZE - P2PHASE(ra, MMU_PAGE_SIZE)) >> 3; 360 ra = MMU_PTOB(MMU_BTOP(ra)); 361 362 for (ttes2map = MIN(ttes2map, pages - ttes_mapped); ttes2map; 363 ttes2map -= ttes_mapped, pfn_p += ttes_mapped) { 364 365 ttes_mapped = 0; 366 if ((ret = hvio_iommu_map(DIP_TO_HANDLE(dip), 367 PCI_TSBID(tsb_num, tsb_index + (pfn_p - pfns)), 368 ttes2map, attr, (io_page_list_t *)(ra | 369 ((uintptr_t)pfn_p & MMU_PAGE_OFFSET)), 370 &ttes_mapped)) != H_EOK) { 371 DBG(DBG_LIB_DMA, dip, "hvio_iommu_map failed " 372 "ret 0x%lx\n", ret); 373 374 ttes_mapped = pfn_p - pfns; 375 err = DDI_FAILURE; 376 goto cleanup; 377 } 378 379 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_map: tsb_num 0x%x " 380 "tsb_index 0x%lx ttes_to_map 0x%lx attr 0x%llx " 381 "ra 0x%p ttes_mapped 0x%x\n", tsb_num, 382 tsb_index + (pfn_p - pfns), ttes2map, attr, 383 ra | ((uintptr_t)pfn_p & MMU_PAGE_OFFSET), 384 ttes_mapped); 385 } 386 } 387 388 cleanup: 389 if ((err == DDI_FAILURE) && ttes_mapped) 390 (void) px_lib_iommu_demap(dip, tsbid, ttes_mapped); 391 392 kmem_free(pfns, pages * sizeof (io_page_list_t)); 393 return (err); 394 } 395 396 /*ARGSUSED*/ 397 int 398 px_lib_iommu_demap(dev_info_t *dip, tsbid_t tsbid, pages_t pages) 399 { 400 tsbnum_t tsb_num = PCI_TSBID_TO_TSBNUM(tsbid); 401 tsbindex_t tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid); 402 pages_t ttes2demap, ttes_demapped = 0; 403 uint64_t ret; 404 405 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_demap: dip 0x%p tsbid 0x%llx " 406 "pages 0x%x\n", dip, tsbid, pages); 407 408 for (ttes2demap = pages; ttes2demap; 409 ttes2demap -= ttes_demapped, tsb_index += ttes_demapped) { 410 if ((ret = hvio_iommu_demap(DIP_TO_HANDLE(dip), 411 PCI_TSBID(tsb_num, tsb_index), ttes2demap, 412 &ttes_demapped)) != H_EOK) { 413 DBG(DBG_LIB_DMA, dip, "hvio_iommu_demap failed, " 414 "ret 0x%lx\n", ret); 415 416 return (DDI_FAILURE); 417 } 418 419 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_demap: tsb_num 0x%x " 420 "tsb_index 0x%lx ttes_to_demap 0x%lx ttes_demapped 0x%x\n", 421 tsb_num, tsb_index, ttes2demap, ttes_demapped); 422 } 423 424 return (DDI_SUCCESS); 425 } 426 427 /*ARGSUSED*/ 428 int 429 px_lib_iommu_getmap(dev_info_t *dip, tsbid_t tsbid, io_attributes_t *attr_p, 430 r_addr_t *r_addr_p) 431 { 432 uint64_t ret; 433 434 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: dip 0x%p tsbid 0x%llx\n", 435 dip, tsbid); 436 437 if ((ret = hvio_iommu_getmap(DIP_TO_HANDLE(dip), tsbid, 438 attr_p, r_addr_p)) != H_EOK) { 439 DBG(DBG_LIB_DMA, dip, 440 "hvio_iommu_getmap failed, ret 0x%lx\n", ret); 441 442 return ((ret == H_ENOMAP) ? DDI_DMA_NOMAPPING:DDI_FAILURE); 443 } 444 445 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: attr 0x%llx " 446 "r_addr 0x%llx\n", *attr_p, *r_addr_p); 447 448 return (DDI_SUCCESS); 449 } 450 451 /*ARGSUSED*/ 452 uint64_t 453 px_get_rng_parent_hi_mask(px_t *px_p) 454 { 455 return (PX_RANGE_PROP_MASK); 456 } 457 458 /* 459 * Checks dma attributes against system bypass ranges 460 * A sun4v device must be capable of generating the entire 64-bit 461 * address in order to perform bypass DMA. 462 */ 463 /*ARGSUSED*/ 464 int 465 px_lib_dma_bypass_rngchk(dev_info_t *dip, ddi_dma_attr_t *attr_p, 466 uint64_t *lo_p, uint64_t *hi_p) 467 { 468 if ((attr_p->dma_attr_addr_lo != 0ull) || 469 (attr_p->dma_attr_addr_hi != UINT64_MAX)) { 470 471 return (DDI_DMA_BADATTR); 472 } 473 474 *lo_p = 0ull; 475 *hi_p = UINT64_MAX; 476 477 return (DDI_SUCCESS); 478 } 479 480 481 /*ARGSUSED*/ 482 int 483 px_lib_iommu_getbypass(dev_info_t *dip, r_addr_t ra, io_attributes_t attr, 484 io_addr_t *io_addr_p) 485 { 486 uint64_t ret; 487 488 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: dip 0x%p ra 0x%llx " 489 "attr 0x%llx\n", dip, ra, attr); 490 491 if ((ret = hvio_iommu_getbypass(DIP_TO_HANDLE(dip), ra, 492 attr, io_addr_p)) != H_EOK) { 493 DBG(DBG_LIB_DMA, dip, 494 "hvio_iommu_getbypass failed, ret 0x%lx\n", ret); 495 return (ret == H_ENOTSUPPORTED ? DDI_ENOTSUP : DDI_FAILURE); 496 } 497 498 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: io_addr 0x%llx\n", 499 *io_addr_p); 500 501 return (DDI_SUCCESS); 502 } 503 504 /* 505 * Returns any needed IO address bit(s) for relaxed ordering in IOMMU 506 * bypass mode. 507 */ 508 /* ARGSUSED */ 509 uint64_t 510 px_lib_ro_bypass(dev_info_t *dip, io_attributes_t attr, uint64_t ioaddr) 511 { 512 return (ioaddr); 513 } 514 515 /*ARGSUSED*/ 516 int 517 px_lib_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 518 off_t off, size_t len, uint_t cache_flags) 519 { 520 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 521 uint64_t sync_dir; 522 size_t bytes_synced; 523 int end, idx; 524 off_t pg_off; 525 devhandle_t hdl = DIP_TO_HANDLE(dip); /* need to cache hdl */ 526 527 DBG(DBG_LIB_DMA, dip, "px_lib_dma_sync: dip 0x%p rdip 0x%p " 528 "handle 0x%llx off 0x%x len 0x%x flags 0x%x\n", 529 dip, rdip, handle, off, len, cache_flags); 530 531 if (!(mp->dmai_flags & PX_DMAI_FLAGS_INUSE)) { 532 cmn_err(CE_WARN, "%s%d: Unbound dma handle %p.", 533 ddi_driver_name(rdip), ddi_get_instance(rdip), (void *)mp); 534 return (DDI_FAILURE); 535 } 536 537 if (mp->dmai_flags & PX_DMAI_FLAGS_NOSYNC) 538 return (DDI_SUCCESS); 539 540 if (!len) 541 len = mp->dmai_size; 542 543 if (mp->dmai_rflags & DDI_DMA_READ) 544 sync_dir = HVIO_DMA_SYNC_DIR_FROM_DEV; 545 else 546 sync_dir = HVIO_DMA_SYNC_DIR_TO_DEV; 547 548 off += mp->dmai_offset; 549 pg_off = off & MMU_PAGEOFFSET; 550 551 DBG(DBG_LIB_DMA, dip, "px_lib_dma_sync: page offset %x size %x\n", 552 pg_off, len); 553 554 /* sync on page basis */ 555 end = MMU_BTOPR(off + len - 1); 556 for (idx = MMU_BTOP(off); idx < end; idx++, 557 len -= bytes_synced, pg_off = 0) { 558 size_t bytes_to_sync = bytes_to_sync = 559 MIN(len, MMU_PAGESIZE - pg_off); 560 561 if (hvio_dma_sync(hdl, MMU_PTOB(PX_GET_MP_PFN(mp, idx)) + 562 pg_off, bytes_to_sync, sync_dir, &bytes_synced) != H_EOK) 563 break; 564 565 DBG(DBG_LIB_DMA, dip, "px_lib_dma_sync: Called hvio_dma_sync " 566 "ra = %p bytes to sync = %x bytes synced %x\n", 567 MMU_PTOB(PX_GET_MP_PFN(mp, idx)) + pg_off, bytes_to_sync, 568 bytes_synced); 569 570 if (bytes_to_sync != bytes_synced) 571 break; 572 } 573 574 return (len ? DDI_FAILURE : DDI_SUCCESS); 575 } 576 577 578 /* 579 * MSIQ Functions: 580 */ 581 582 /*ARGSUSED*/ 583 int 584 px_lib_msiq_init(dev_info_t *dip) 585 { 586 px_t *px_p = DIP_TO_STATE(dip); 587 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 588 r_addr_t ra; 589 size_t msiq_size; 590 uint_t rec_cnt; 591 int i, err = DDI_SUCCESS; 592 uint64_t ret; 593 594 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_init: dip 0x%p\n", dip); 595 596 msiq_size = msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t); 597 598 /* sun4v requires all EQ allocation to be on q size boundary */ 599 if ((msiq_state_p->msiq_buf_p = contig_mem_alloc_align( 600 msiq_state_p->msiq_cnt * msiq_size, msiq_size)) == NULL) { 601 DBG(DBG_LIB_MSIQ, dip, 602 "px_lib_msiq_init: Contig alloc failed\n"); 603 604 return (DDI_FAILURE); 605 } 606 607 for (i = 0; i < msiq_state_p->msiq_cnt; i++) { 608 msiq_state_p->msiq_p[i].msiq_base_p = (msiqhead_t *) 609 ((caddr_t)msiq_state_p->msiq_buf_p + (i * msiq_size)); 610 611 ra = (r_addr_t)va_to_pa((caddr_t)msiq_state_p->msiq_buf_p + 612 (i * msiq_size)); 613 614 if ((ret = hvio_msiq_conf(DIP_TO_HANDLE(dip), 615 (i + msiq_state_p->msiq_1st_msiq_id), 616 ra, msiq_state_p->msiq_rec_cnt)) != H_EOK) { 617 DBG(DBG_LIB_MSIQ, dip, 618 "hvio_msiq_conf failed, ret 0x%lx\n", ret); 619 err = DDI_FAILURE; 620 break; 621 } 622 623 if ((err = px_lib_msiq_info(dip, 624 (i + msiq_state_p->msiq_1st_msiq_id), 625 &ra, &rec_cnt)) != DDI_SUCCESS) { 626 DBG(DBG_LIB_MSIQ, dip, 627 "px_lib_msiq_info failed, ret 0x%x\n", err); 628 err = DDI_FAILURE; 629 break; 630 } 631 632 DBG(DBG_LIB_MSIQ, dip, 633 "px_lib_msiq_init: ra 0x%p rec_cnt 0x%x\n", ra, rec_cnt); 634 } 635 636 return (err); 637 } 638 639 /*ARGSUSED*/ 640 int 641 px_lib_msiq_fini(dev_info_t *dip) 642 { 643 px_t *px_p = DIP_TO_STATE(dip); 644 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 645 size_t msiq_size; 646 647 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_fini: dip 0x%p\n", dip); 648 msiq_size = msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t); 649 650 if (msiq_state_p->msiq_buf_p != NULL) 651 contig_mem_free(msiq_state_p->msiq_buf_p, 652 msiq_state_p->msiq_cnt * msiq_size); 653 654 return (DDI_SUCCESS); 655 } 656 657 /*ARGSUSED*/ 658 int 659 px_lib_msiq_info(dev_info_t *dip, msiqid_t msiq_id, r_addr_t *ra_p, 660 uint_t *msiq_rec_cnt_p) 661 { 662 uint64_t ret; 663 664 DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: dip 0x%p msiq_id 0x%x\n", 665 dip, msiq_id); 666 667 if ((ret = hvio_msiq_info(DIP_TO_HANDLE(dip), 668 msiq_id, ra_p, msiq_rec_cnt_p)) != H_EOK) { 669 DBG(DBG_LIB_MSIQ, dip, 670 "hvio_msiq_info failed, ret 0x%lx\n", ret); 671 return (DDI_FAILURE); 672 } 673 674 DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: ra_p 0x%p msiq_rec_cnt 0x%x\n", 675 ra_p, *msiq_rec_cnt_p); 676 677 return (DDI_SUCCESS); 678 } 679 680 /*ARGSUSED*/ 681 int 682 px_lib_msiq_getvalid(dev_info_t *dip, msiqid_t msiq_id, 683 pci_msiq_valid_state_t *msiq_valid_state) 684 { 685 uint64_t ret; 686 687 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: dip 0x%p msiq_id 0x%x\n", 688 dip, msiq_id); 689 690 if ((ret = hvio_msiq_getvalid(DIP_TO_HANDLE(dip), 691 msiq_id, msiq_valid_state)) != H_EOK) { 692 DBG(DBG_LIB_MSIQ, dip, 693 "hvio_msiq_getvalid failed, ret 0x%lx\n", ret); 694 return (DDI_FAILURE); 695 } 696 697 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: msiq_valid_state 0x%x\n", 698 *msiq_valid_state); 699 700 return (DDI_SUCCESS); 701 } 702 703 /*ARGSUSED*/ 704 int 705 px_lib_msiq_setvalid(dev_info_t *dip, msiqid_t msiq_id, 706 pci_msiq_valid_state_t msiq_valid_state) 707 { 708 uint64_t ret; 709 710 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setvalid: dip 0x%p msiq_id 0x%x " 711 "msiq_valid_state 0x%x\n", dip, msiq_id, msiq_valid_state); 712 713 if ((ret = hvio_msiq_setvalid(DIP_TO_HANDLE(dip), 714 msiq_id, msiq_valid_state)) != H_EOK) { 715 DBG(DBG_LIB_MSIQ, dip, 716 "hvio_msiq_setvalid failed, ret 0x%lx\n", ret); 717 return (DDI_FAILURE); 718 } 719 720 return (DDI_SUCCESS); 721 } 722 723 /*ARGSUSED*/ 724 int 725 px_lib_msiq_getstate(dev_info_t *dip, msiqid_t msiq_id, 726 pci_msiq_state_t *msiq_state) 727 { 728 uint64_t ret; 729 730 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: dip 0x%p msiq_id 0x%x\n", 731 dip, msiq_id); 732 733 if ((ret = hvio_msiq_getstate(DIP_TO_HANDLE(dip), 734 msiq_id, msiq_state)) != H_EOK) { 735 DBG(DBG_LIB_MSIQ, dip, 736 "hvio_msiq_getstate failed, ret 0x%lx\n", ret); 737 return (DDI_FAILURE); 738 } 739 740 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: msiq_state 0x%x\n", 741 *msiq_state); 742 743 return (DDI_SUCCESS); 744 } 745 746 /*ARGSUSED*/ 747 int 748 px_lib_msiq_setstate(dev_info_t *dip, msiqid_t msiq_id, 749 pci_msiq_state_t msiq_state) 750 { 751 uint64_t ret; 752 753 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setstate: dip 0x%p msiq_id 0x%x " 754 "msiq_state 0x%x\n", dip, msiq_id, msiq_state); 755 756 if ((ret = hvio_msiq_setstate(DIP_TO_HANDLE(dip), 757 msiq_id, msiq_state)) != H_EOK) { 758 DBG(DBG_LIB_MSIQ, dip, 759 "hvio_msiq_setstate failed, ret 0x%lx\n", ret); 760 return (DDI_FAILURE); 761 } 762 763 return (DDI_SUCCESS); 764 } 765 766 /*ARGSUSED*/ 767 int 768 px_lib_msiq_gethead(dev_info_t *dip, msiqid_t msiq_id, 769 msiqhead_t *msiq_head_p) 770 { 771 uint64_t ret; 772 773 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: dip 0x%p msiq_id 0x%x\n", 774 dip, msiq_id); 775 776 if ((ret = hvio_msiq_gethead(DIP_TO_HANDLE(dip), 777 msiq_id, msiq_head_p)) != H_EOK) { 778 DBG(DBG_LIB_MSIQ, dip, 779 "hvio_msiq_gethead failed, ret 0x%lx\n", ret); 780 return (DDI_FAILURE); 781 } 782 783 *msiq_head_p = (*msiq_head_p / sizeof (msiq_rec_t)); 784 785 DBG(DBG_LIB_MSIQ, dip, "px_msiq_gethead: msiq_head 0x%x\n", 786 *msiq_head_p); 787 788 return (DDI_SUCCESS); 789 } 790 791 /*ARGSUSED*/ 792 int 793 px_lib_msiq_sethead(dev_info_t *dip, msiqid_t msiq_id, 794 msiqhead_t msiq_head) 795 { 796 uint64_t ret; 797 798 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_sethead: dip 0x%p msiq_id 0x%x " 799 "msiq_head 0x%x\n", dip, msiq_id, msiq_head); 800 801 if ((ret = hvio_msiq_sethead(DIP_TO_HANDLE(dip), 802 msiq_id, msiq_head * sizeof (msiq_rec_t))) != H_EOK) { 803 DBG(DBG_LIB_MSIQ, dip, 804 "hvio_msiq_sethead failed, ret 0x%lx\n", ret); 805 return (DDI_FAILURE); 806 } 807 808 return (DDI_SUCCESS); 809 } 810 811 /*ARGSUSED*/ 812 int 813 px_lib_msiq_gettail(dev_info_t *dip, msiqid_t msiq_id, 814 msiqtail_t *msiq_tail_p) 815 { 816 uint64_t ret; 817 818 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: dip 0x%p msiq_id 0x%x\n", 819 dip, msiq_id); 820 821 if ((ret = hvio_msiq_gettail(DIP_TO_HANDLE(dip), 822 msiq_id, msiq_tail_p)) != H_EOK) { 823 DBG(DBG_LIB_MSIQ, dip, 824 "hvio_msiq_gettail failed, ret 0x%lx\n", ret); 825 return (DDI_FAILURE); 826 } 827 828 *msiq_tail_p = (*msiq_tail_p / sizeof (msiq_rec_t)); 829 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: msiq_tail 0x%x\n", 830 *msiq_tail_p); 831 832 return (DDI_SUCCESS); 833 } 834 835 /*ARGSUSED*/ 836 void 837 px_lib_get_msiq_rec(dev_info_t *dip, msiqhead_t *msiq_head_p, 838 msiq_rec_t *msiq_rec_p) 839 { 840 msiq_rec_t *curr_msiq_rec_p = (msiq_rec_t *)msiq_head_p; 841 842 DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: dip 0x%p\n", dip); 843 844 if (!curr_msiq_rec_p->msiq_rec_type) { 845 /* Set msiq_rec_type to zero */ 846 msiq_rec_p->msiq_rec_type = 0; 847 848 return; 849 } 850 851 *msiq_rec_p = *curr_msiq_rec_p; 852 } 853 854 /*ARGSUSED*/ 855 void 856 px_lib_clr_msiq_rec(dev_info_t *dip, msiqhead_t *msiq_head_p) 857 { 858 msiq_rec_t *curr_msiq_rec_p = (msiq_rec_t *)msiq_head_p; 859 860 DBG(DBG_LIB_MSIQ, dip, "px_lib_clr_msiq_rec: dip 0x%p\n", dip); 861 862 /* Zero out msiq_rec_type field */ 863 curr_msiq_rec_p->msiq_rec_type = 0; 864 } 865 866 /* 867 * MSI Functions: 868 */ 869 870 /*ARGSUSED*/ 871 int 872 px_lib_msi_init(dev_info_t *dip) 873 { 874 DBG(DBG_LIB_MSI, dip, "px_lib_msi_init: dip 0x%p\n", dip); 875 876 /* Noop */ 877 return (DDI_SUCCESS); 878 } 879 880 /*ARGSUSED*/ 881 int 882 px_lib_msi_getmsiq(dev_info_t *dip, msinum_t msi_num, 883 msiqid_t *msiq_id) 884 { 885 uint64_t ret; 886 887 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: dip 0x%p msi_num 0x%x\n", 888 dip, msi_num); 889 890 if ((ret = hvio_msi_getmsiq(DIP_TO_HANDLE(dip), 891 msi_num, msiq_id)) != H_EOK) { 892 DBG(DBG_LIB_MSI, dip, 893 "hvio_msi_getmsiq failed, ret 0x%lx\n", ret); 894 return (DDI_FAILURE); 895 } 896 897 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: msiq_id 0x%x\n", 898 *msiq_id); 899 900 return (DDI_SUCCESS); 901 } 902 903 /*ARGSUSED*/ 904 int 905 px_lib_msi_setmsiq(dev_info_t *dip, msinum_t msi_num, 906 msiqid_t msiq_id, msi_type_t msitype) 907 { 908 uint64_t ret; 909 910 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setmsiq: dip 0x%p msi_num 0x%x " 911 "msq_id 0x%x\n", dip, msi_num, msiq_id); 912 913 if ((ret = hvio_msi_setmsiq(DIP_TO_HANDLE(dip), 914 msi_num, msiq_id, msitype)) != H_EOK) { 915 DBG(DBG_LIB_MSI, dip, 916 "hvio_msi_setmsiq failed, ret 0x%lx\n", ret); 917 return (DDI_FAILURE); 918 } 919 920 return (DDI_SUCCESS); 921 } 922 923 /*ARGSUSED*/ 924 int 925 px_lib_msi_getvalid(dev_info_t *dip, msinum_t msi_num, 926 pci_msi_valid_state_t *msi_valid_state) 927 { 928 uint64_t ret; 929 930 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: dip 0x%p msi_num 0x%x\n", 931 dip, msi_num); 932 933 if ((ret = hvio_msi_getvalid(DIP_TO_HANDLE(dip), 934 msi_num, msi_valid_state)) != H_EOK) { 935 DBG(DBG_LIB_MSI, dip, 936 "hvio_msi_getvalid failed, ret 0x%lx\n", ret); 937 return (DDI_FAILURE); 938 } 939 940 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: msiq_id 0x%x\n", 941 *msi_valid_state); 942 943 return (DDI_SUCCESS); 944 } 945 946 /*ARGSUSED*/ 947 int 948 px_lib_msi_setvalid(dev_info_t *dip, msinum_t msi_num, 949 pci_msi_valid_state_t msi_valid_state) 950 { 951 uint64_t ret; 952 953 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setvalid: dip 0x%p msi_num 0x%x " 954 "msi_valid_state 0x%x\n", dip, msi_num, msi_valid_state); 955 956 if ((ret = hvio_msi_setvalid(DIP_TO_HANDLE(dip), 957 msi_num, msi_valid_state)) != H_EOK) { 958 DBG(DBG_LIB_MSI, dip, 959 "hvio_msi_setvalid failed, ret 0x%lx\n", ret); 960 return (DDI_FAILURE); 961 } 962 963 return (DDI_SUCCESS); 964 } 965 966 /*ARGSUSED*/ 967 int 968 px_lib_msi_getstate(dev_info_t *dip, msinum_t msi_num, 969 pci_msi_state_t *msi_state) 970 { 971 uint64_t ret; 972 973 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: dip 0x%p msi_num 0x%x\n", 974 dip, msi_num); 975 976 if ((ret = hvio_msi_getstate(DIP_TO_HANDLE(dip), 977 msi_num, msi_state)) != H_EOK) { 978 DBG(DBG_LIB_MSI, dip, 979 "hvio_msi_getstate failed, ret 0x%lx\n", ret); 980 return (DDI_FAILURE); 981 } 982 983 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: msi_state 0x%x\n", 984 *msi_state); 985 986 return (DDI_SUCCESS); 987 } 988 989 /*ARGSUSED*/ 990 int 991 px_lib_msi_setstate(dev_info_t *dip, msinum_t msi_num, 992 pci_msi_state_t msi_state) 993 { 994 uint64_t ret; 995 996 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setstate: dip 0x%p msi_num 0x%x " 997 "msi_state 0x%x\n", dip, msi_num, msi_state); 998 999 if ((ret = hvio_msi_setstate(DIP_TO_HANDLE(dip), 1000 msi_num, msi_state)) != H_EOK) { 1001 DBG(DBG_LIB_MSI, dip, 1002 "hvio_msi_setstate failed, ret 0x%lx\n", ret); 1003 return (DDI_FAILURE); 1004 } 1005 1006 return (DDI_SUCCESS); 1007 } 1008 1009 /* 1010 * MSG Functions: 1011 */ 1012 1013 /*ARGSUSED*/ 1014 int 1015 px_lib_msg_getmsiq(dev_info_t *dip, pcie_msg_type_t msg_type, 1016 msiqid_t *msiq_id) 1017 { 1018 uint64_t ret; 1019 1020 DBG(DBG_LIB_MSG, dip, "px_lib_msg_getmsiq: dip 0x%p msg_type 0x%x\n", 1021 dip, msg_type); 1022 1023 if ((ret = hvio_msg_getmsiq(DIP_TO_HANDLE(dip), 1024 msg_type, msiq_id)) != H_EOK) { 1025 DBG(DBG_LIB_MSG, dip, 1026 "hvio_msg_getmsiq failed, ret 0x%lx\n", ret); 1027 return (DDI_FAILURE); 1028 } 1029 1030 DBG(DBG_LIB_MSI, dip, "px_lib_msg_getmsiq: msiq_id 0x%x\n", 1031 *msiq_id); 1032 1033 return (DDI_SUCCESS); 1034 } 1035 1036 /*ARGSUSED*/ 1037 int 1038 px_lib_msg_setmsiq(dev_info_t *dip, pcie_msg_type_t msg_type, 1039 msiqid_t msiq_id) 1040 { 1041 uint64_t ret; 1042 1043 DBG(DBG_LIB_MSG, dip, "px_lib_msg_setmsiq: dip 0x%p msg_type 0x%x " 1044 "msq_id 0x%x\n", dip, msg_type, msiq_id); 1045 1046 if ((ret = hvio_msg_setmsiq(DIP_TO_HANDLE(dip), 1047 msg_type, msiq_id)) != H_EOK) { 1048 DBG(DBG_LIB_MSG, dip, 1049 "hvio_msg_setmsiq failed, ret 0x%lx\n", ret); 1050 return (DDI_FAILURE); 1051 } 1052 1053 return (DDI_SUCCESS); 1054 } 1055 1056 /*ARGSUSED*/ 1057 int 1058 px_lib_msg_getvalid(dev_info_t *dip, pcie_msg_type_t msg_type, 1059 pcie_msg_valid_state_t *msg_valid_state) 1060 { 1061 uint64_t ret; 1062 1063 DBG(DBG_LIB_MSG, dip, "px_lib_msg_getvalid: dip 0x%p msg_type 0x%x\n", 1064 dip, msg_type); 1065 1066 if ((ret = hvio_msg_getvalid(DIP_TO_HANDLE(dip), msg_type, 1067 msg_valid_state)) != H_EOK) { 1068 DBG(DBG_LIB_MSG, dip, 1069 "hvio_msg_getvalid failed, ret 0x%lx\n", ret); 1070 return (DDI_FAILURE); 1071 } 1072 1073 DBG(DBG_LIB_MSI, dip, "px_lib_msg_getvalid: msg_valid_state 0x%x\n", 1074 *msg_valid_state); 1075 1076 return (DDI_SUCCESS); 1077 } 1078 1079 /*ARGSUSED*/ 1080 int 1081 px_lib_msg_setvalid(dev_info_t *dip, pcie_msg_type_t msg_type, 1082 pcie_msg_valid_state_t msg_valid_state) 1083 { 1084 uint64_t ret; 1085 1086 DBG(DBG_LIB_MSG, dip, "px_lib_msg_setvalid: dip 0x%p msg_type 0x%x " 1087 "msg_valid_state 0x%x\n", dip, msg_type, msg_valid_state); 1088 1089 if ((ret = hvio_msg_setvalid(DIP_TO_HANDLE(dip), msg_type, 1090 msg_valid_state)) != H_EOK) { 1091 DBG(DBG_LIB_MSG, dip, 1092 "hvio_msg_setvalid failed, ret 0x%lx\n", ret); 1093 return (DDI_FAILURE); 1094 } 1095 1096 return (DDI_SUCCESS); 1097 } 1098 1099 /* 1100 * Suspend/Resume Functions: 1101 * Currently unsupported by hypervisor and all functions are noops. 1102 */ 1103 /*ARGSUSED*/ 1104 int 1105 px_lib_suspend(dev_info_t *dip) 1106 { 1107 DBG(DBG_ATTACH, dip, "px_lib_suspend: Not supported\n"); 1108 1109 /* Not supported */ 1110 return (DDI_FAILURE); 1111 } 1112 1113 /*ARGSUSED*/ 1114 void 1115 px_lib_resume(dev_info_t *dip) 1116 { 1117 DBG(DBG_ATTACH, dip, "px_lib_resume: Not supported\n"); 1118 1119 /* Noop */ 1120 } 1121 1122 /* 1123 * Misc Functions: 1124 * Currently unsupported by hypervisor and all functions are noops. 1125 */ 1126 /*ARGSUSED*/ 1127 static int 1128 px_lib_config_get(dev_info_t *dip, pci_device_t bdf, pci_config_offset_t off, 1129 uint8_t size, pci_cfg_data_t *data_p) 1130 { 1131 uint64_t ret; 1132 1133 DBG(DBG_LIB_CFG, dip, "px_lib_config_get: dip 0x%p, bdf 0x%llx " 1134 "off 0x%x size 0x%x\n", dip, bdf, off, size); 1135 1136 if ((ret = hvio_config_get(DIP_TO_HANDLE(dip), bdf, off, 1137 size, data_p)) != H_EOK) { 1138 DBG(DBG_LIB_CFG, dip, 1139 "hvio_config_get failed, ret 0x%lx\n", ret); 1140 return (DDI_FAILURE); 1141 } 1142 DBG(DBG_LIB_CFG, dip, "px_config_get: data 0x%x\n", data_p->dw); 1143 1144 return (DDI_SUCCESS); 1145 } 1146 1147 /*ARGSUSED*/ 1148 static int 1149 px_lib_config_put(dev_info_t *dip, pci_device_t bdf, pci_config_offset_t off, 1150 uint8_t size, pci_cfg_data_t data) 1151 { 1152 uint64_t ret; 1153 1154 DBG(DBG_LIB_CFG, dip, "px_lib_config_put: dip 0x%p, bdf 0x%llx " 1155 "off 0x%x size 0x%x data 0x%llx\n", dip, bdf, off, size, data.qw); 1156 1157 if ((ret = hvio_config_put(DIP_TO_HANDLE(dip), bdf, off, 1158 size, data)) != H_EOK) { 1159 DBG(DBG_LIB_CFG, dip, 1160 "hvio_config_put failed, ret 0x%lx\n", ret); 1161 return (DDI_FAILURE); 1162 } 1163 1164 return (DDI_SUCCESS); 1165 } 1166 1167 static uint32_t 1168 px_pci_config_get(ddi_acc_impl_t *handle, uint32_t *addr, int size) 1169 { 1170 px_config_acc_pvt_t *px_pvt = (px_config_acc_pvt_t *) 1171 handle->ahi_common.ah_bus_private; 1172 uint32_t pci_dev_addr = px_pvt->raddr; 1173 uint32_t vaddr = px_pvt->vaddr; 1174 uint16_t off = (uint16_t)(uintptr_t)(addr - vaddr) & 0xfff; 1175 uint64_t rdata = 0; 1176 1177 if (px_lib_config_get(px_pvt->dip, pci_dev_addr, off, 1178 size, (pci_cfg_data_t *)&rdata) != DDI_SUCCESS) 1179 /* XXX update error kstats */ 1180 return (0xffffffff); 1181 return ((uint32_t)rdata); 1182 } 1183 1184 static void 1185 px_pci_config_put(ddi_acc_impl_t *handle, uint32_t *addr, 1186 int size, pci_cfg_data_t wdata) 1187 { 1188 px_config_acc_pvt_t *px_pvt = (px_config_acc_pvt_t *) 1189 handle->ahi_common.ah_bus_private; 1190 uint32_t pci_dev_addr = px_pvt->raddr; 1191 uint32_t vaddr = px_pvt->vaddr; 1192 uint16_t off = (uint16_t)(uintptr_t)(addr - vaddr) & 0xfff; 1193 1194 if (px_lib_config_put(px_pvt->dip, pci_dev_addr, off, 1195 size, wdata) != DDI_SUCCESS) { 1196 /*EMPTY*/ 1197 /* XXX update error kstats */ 1198 } 1199 } 1200 1201 static uint8_t 1202 px_pci_config_get8(ddi_acc_impl_t *handle, uint8_t *addr) 1203 { 1204 return ((uint8_t)px_pci_config_get(handle, (uint32_t *)addr, 1)); 1205 } 1206 1207 static uint16_t 1208 px_pci_config_get16(ddi_acc_impl_t *handle, uint16_t *addr) 1209 { 1210 return ((uint16_t)px_pci_config_get(handle, (uint32_t *)addr, 2)); 1211 } 1212 1213 static uint32_t 1214 px_pci_config_get32(ddi_acc_impl_t *handle, uint32_t *addr) 1215 { 1216 return ((uint32_t)px_pci_config_get(handle, (uint32_t *)addr, 4)); 1217 } 1218 1219 static uint64_t 1220 px_pci_config_get64(ddi_acc_impl_t *handle, uint64_t *addr) 1221 { 1222 uint32_t rdatah, rdatal; 1223 1224 rdatal = (uint32_t)px_pci_config_get(handle, (uint32_t *)addr, 4); 1225 rdatah = (uint32_t)px_pci_config_get(handle, 1226 (uint32_t *)((char *)addr+4), 4); 1227 return (((uint64_t)rdatah << 32) | rdatal); 1228 } 1229 1230 static void 1231 px_pci_config_put8(ddi_acc_impl_t *handle, uint8_t *addr, uint8_t data) 1232 { 1233 pci_cfg_data_t wdata = { 0 }; 1234 1235 wdata.qw = (uint8_t)data; 1236 px_pci_config_put(handle, (uint32_t *)addr, 1, wdata); 1237 } 1238 1239 static void 1240 px_pci_config_put16(ddi_acc_impl_t *handle, uint16_t *addr, uint16_t data) 1241 { 1242 pci_cfg_data_t wdata = { 0 }; 1243 1244 wdata.qw = (uint16_t)data; 1245 px_pci_config_put(handle, (uint32_t *)addr, 2, wdata); 1246 } 1247 1248 static void 1249 px_pci_config_put32(ddi_acc_impl_t *handle, uint32_t *addr, uint32_t data) 1250 { 1251 pci_cfg_data_t wdata = { 0 }; 1252 1253 wdata.qw = (uint32_t)data; 1254 px_pci_config_put(handle, (uint32_t *)addr, 4, wdata); 1255 } 1256 1257 static void 1258 px_pci_config_put64(ddi_acc_impl_t *handle, uint64_t *addr, uint64_t data) 1259 { 1260 pci_cfg_data_t wdata = { 0 }; 1261 1262 wdata.qw = (uint32_t)(data & 0xffffffff); 1263 px_pci_config_put(handle, (uint32_t *)addr, 4, wdata); 1264 wdata.qw = (uint32_t)((data >> 32) & 0xffffffff); 1265 px_pci_config_put(handle, (uint32_t *)((char *)addr+4), 4, wdata); 1266 } 1267 1268 static void 1269 px_pci_config_rep_get8(ddi_acc_impl_t *handle, uint8_t *host_addr, 1270 uint8_t *dev_addr, size_t repcount, uint_t flags) 1271 { 1272 if (flags == DDI_DEV_AUTOINCR) 1273 for (; repcount; repcount--) 1274 *host_addr++ = px_pci_config_get8(handle, dev_addr++); 1275 else 1276 for (; repcount; repcount--) 1277 *host_addr++ = px_pci_config_get8(handle, dev_addr); 1278 } 1279 1280 /* 1281 * Function to rep read 16 bit data off the PCI configuration space behind 1282 * the 21554's host interface. 1283 */ 1284 static void 1285 px_pci_config_rep_get16(ddi_acc_impl_t *handle, uint16_t *host_addr, 1286 uint16_t *dev_addr, size_t repcount, uint_t flags) 1287 { 1288 if (flags == DDI_DEV_AUTOINCR) 1289 for (; repcount; repcount--) 1290 *host_addr++ = px_pci_config_get16(handle, dev_addr++); 1291 else 1292 for (; repcount; repcount--) 1293 *host_addr++ = px_pci_config_get16(handle, dev_addr); 1294 } 1295 1296 /* 1297 * Function to rep read 32 bit data off the PCI configuration space behind 1298 * the 21554's host interface. 1299 */ 1300 static void 1301 px_pci_config_rep_get32(ddi_acc_impl_t *handle, uint32_t *host_addr, 1302 uint32_t *dev_addr, size_t repcount, uint_t flags) 1303 { 1304 if (flags == DDI_DEV_AUTOINCR) 1305 for (; repcount; repcount--) 1306 *host_addr++ = px_pci_config_get32(handle, dev_addr++); 1307 else 1308 for (; repcount; repcount--) 1309 *host_addr++ = px_pci_config_get32(handle, dev_addr); 1310 } 1311 1312 /* 1313 * Function to rep read 64 bit data off the PCI configuration space behind 1314 * the 21554's host interface. 1315 */ 1316 static void 1317 px_pci_config_rep_get64(ddi_acc_impl_t *handle, uint64_t *host_addr, 1318 uint64_t *dev_addr, size_t repcount, uint_t flags) 1319 { 1320 if (flags == DDI_DEV_AUTOINCR) 1321 for (; repcount; repcount--) 1322 *host_addr++ = px_pci_config_get64(handle, dev_addr++); 1323 else 1324 for (; repcount; repcount--) 1325 *host_addr++ = px_pci_config_get64(handle, dev_addr); 1326 } 1327 1328 /* 1329 * Function to rep write 8 bit data into the PCI configuration space behind 1330 * the 21554's host interface. 1331 */ 1332 static void 1333 px_pci_config_rep_put8(ddi_acc_impl_t *handle, uint8_t *host_addr, 1334 uint8_t *dev_addr, size_t repcount, uint_t flags) 1335 { 1336 if (flags == DDI_DEV_AUTOINCR) 1337 for (; repcount; repcount--) 1338 px_pci_config_put8(handle, dev_addr++, *host_addr++); 1339 else 1340 for (; repcount; repcount--) 1341 px_pci_config_put8(handle, dev_addr, *host_addr++); 1342 } 1343 1344 /* 1345 * Function to rep write 16 bit data into the PCI configuration space behind 1346 * the 21554's host interface. 1347 */ 1348 static void 1349 px_pci_config_rep_put16(ddi_acc_impl_t *handle, uint16_t *host_addr, 1350 uint16_t *dev_addr, size_t repcount, uint_t flags) 1351 { 1352 if (flags == DDI_DEV_AUTOINCR) 1353 for (; repcount; repcount--) 1354 px_pci_config_put16(handle, dev_addr++, *host_addr++); 1355 else 1356 for (; repcount; repcount--) 1357 px_pci_config_put16(handle, dev_addr, *host_addr++); 1358 } 1359 1360 /* 1361 * Function to rep write 32 bit data into the PCI configuration space behind 1362 * the 21554's host interface. 1363 */ 1364 static void 1365 px_pci_config_rep_put32(ddi_acc_impl_t *handle, uint32_t *host_addr, 1366 uint32_t *dev_addr, size_t repcount, uint_t flags) 1367 { 1368 if (flags == DDI_DEV_AUTOINCR) 1369 for (; repcount; repcount--) 1370 px_pci_config_put32(handle, dev_addr++, *host_addr++); 1371 else 1372 for (; repcount; repcount--) 1373 px_pci_config_put32(handle, dev_addr, *host_addr++); 1374 } 1375 1376 /* 1377 * Function to rep write 64 bit data into the PCI configuration space behind 1378 * the 21554's host interface. 1379 */ 1380 static void 1381 px_pci_config_rep_put64(ddi_acc_impl_t *handle, uint64_t *host_addr, 1382 uint64_t *dev_addr, size_t repcount, uint_t flags) 1383 { 1384 if (flags == DDI_DEV_AUTOINCR) 1385 for (; repcount; repcount--) 1386 px_pci_config_put64(handle, dev_addr++, *host_addr++); 1387 else 1388 for (; repcount; repcount--) 1389 px_pci_config_put64(handle, dev_addr, *host_addr++); 1390 } 1391 1392 /* 1393 * Provide a private access handle to route config access calls to Hypervisor. 1394 * Beware: Do all error checking for config space accesses before calling 1395 * this function. ie. do error checking from the calling function. 1396 * Due to a lack of meaningful error code in DDI, the gauranteed return of 1397 * DDI_SUCCESS from here makes the code organization readable/easier from 1398 * the generic code. 1399 */ 1400 /*ARGSUSED*/ 1401 int 1402 px_lib_map_vconfig(dev_info_t *dip, 1403 ddi_map_req_t *mp, pci_config_offset_t off, 1404 pci_regspec_t *rp, caddr_t *addrp) 1405 { 1406 int fmcap; 1407 ndi_err_t *errp; 1408 on_trap_data_t *otp; 1409 ddi_acc_hdl_t *hp; 1410 ddi_acc_impl_t *ap; 1411 uchar_t busnum; /* bus number */ 1412 uchar_t devnum; /* device number */ 1413 uchar_t funcnum; /* function number */ 1414 px_config_acc_pvt_t *px_pvt; 1415 1416 hp = (ddi_acc_hdl_t *)mp->map_handlep; 1417 ap = (ddi_acc_impl_t *)hp->ah_platform_private; 1418 1419 /* Check for mapping teardown operation */ 1420 if ((mp->map_op == DDI_MO_UNMAP) || 1421 (mp->map_op == DDI_MO_UNLOCK)) { 1422 /* free up memory allocated for the private access handle. */ 1423 px_pvt = (px_config_acc_pvt_t *)hp->ah_bus_private; 1424 kmem_free((void *)px_pvt, sizeof (px_config_acc_pvt_t)); 1425 1426 /* unmap operation of PCI IO/config space. */ 1427 return (DDI_SUCCESS); 1428 } 1429 1430 fmcap = ddi_fm_capable(dip); 1431 if (DDI_FM_ACC_ERR_CAP(fmcap)) { 1432 errp = ((ddi_acc_impl_t *)hp)->ahi_err; 1433 otp = (on_trap_data_t *)errp->err_ontrap; 1434 otp->ot_handle = (void *)(hp); 1435 otp->ot_prot = OT_DATA_ACCESS; 1436 errp->err_status = DDI_FM_OK; 1437 errp->err_expected = DDI_FM_ERR_UNEXPECTED; 1438 errp->err_cf = px_err_cfg_hdl_check; 1439 } 1440 1441 ap->ahi_get8 = px_pci_config_get8; 1442 ap->ahi_get16 = px_pci_config_get16; 1443 ap->ahi_get32 = px_pci_config_get32; 1444 ap->ahi_get64 = px_pci_config_get64; 1445 ap->ahi_put8 = px_pci_config_put8; 1446 ap->ahi_put16 = px_pci_config_put16; 1447 ap->ahi_put32 = px_pci_config_put32; 1448 ap->ahi_put64 = px_pci_config_put64; 1449 ap->ahi_rep_get8 = px_pci_config_rep_get8; 1450 ap->ahi_rep_get16 = px_pci_config_rep_get16; 1451 ap->ahi_rep_get32 = px_pci_config_rep_get32; 1452 ap->ahi_rep_get64 = px_pci_config_rep_get64; 1453 ap->ahi_rep_put8 = px_pci_config_rep_put8; 1454 ap->ahi_rep_put16 = px_pci_config_rep_put16; 1455 ap->ahi_rep_put32 = px_pci_config_rep_put32; 1456 ap->ahi_rep_put64 = px_pci_config_rep_put64; 1457 1458 /* Initialize to default check/notify functions */ 1459 ap->ahi_fault = 0; 1460 ap->ahi_fault_check = i_ddi_acc_fault_check; 1461 ap->ahi_fault_notify = i_ddi_acc_fault_notify; 1462 1463 /* allocate memory for our private handle */ 1464 px_pvt = (px_config_acc_pvt_t *) 1465 kmem_zalloc(sizeof (px_config_acc_pvt_t), KM_SLEEP); 1466 hp->ah_bus_private = (void *)px_pvt; 1467 1468 busnum = PCI_REG_BUS_G(rp->pci_phys_hi); 1469 devnum = PCI_REG_DEV_G(rp->pci_phys_hi); 1470 funcnum = PCI_REG_FUNC_G(rp->pci_phys_hi); 1471 1472 /* set up private data for use during IO routines */ 1473 1474 /* addr needed by the HV APIs */ 1475 px_pvt->raddr = busnum << 16 | devnum << 11 | funcnum << 8; 1476 /* 1477 * Address that specifies the actual offset into the 256MB 1478 * memory mapped configuration space, 4K per device. 1479 * First 12bits form the offset into 4K config space. 1480 * This address is only used during the IO routines to calculate 1481 * the offset at which the transaction must be performed. 1482 * Drivers bypassing DDI functions to access PCI config space will 1483 * panic the system since the following is a bogus virtual address. 1484 */ 1485 px_pvt->vaddr = busnum << 20 | devnum << 15 | funcnum << 12 | off; 1486 px_pvt->dip = dip; 1487 1488 DBG(DBG_LIB_CFG, dip, "px_config_setup: raddr 0x%x, vaddr 0x%x\n", 1489 px_pvt->raddr, px_pvt->vaddr); 1490 *addrp = (caddr_t)(uintptr_t)px_pvt->vaddr; 1491 return (DDI_SUCCESS); 1492 } 1493 1494 /*ARGSUSED*/ 1495 void 1496 px_lib_map_attr_check(ddi_map_req_t *mp) 1497 { 1498 } 1499 1500 /* 1501 * px_lib_log_safeacc_err: 1502 * Imitate a cpu/mem trap call when a peek/poke fails. 1503 * This will initiate something similar to px_fm_callback. 1504 */ 1505 static void 1506 px_lib_log_safeacc_err(px_t *px_p, ddi_acc_handle_t handle, int fme_flag, 1507 r_addr_t addr) 1508 { 1509 uint32_t addr_high, addr_low; 1510 pcie_req_id_t bdf = PCIE_INVALID_BDF; 1511 px_ranges_t *ranges_p; 1512 int range_len, i; 1513 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)handle; 1514 ddi_fm_error_t derr; 1515 1516 derr.fme_status = DDI_FM_NONFATAL; 1517 derr.fme_version = DDI_FME_VERSION; 1518 derr.fme_flag = fme_flag; 1519 derr.fme_ena = fm_ena_generate(0, FM_ENA_FMT1); 1520 derr.fme_acc_handle = handle; 1521 if (hp) 1522 hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED; 1523 1524 addr_high = (uint32_t)(addr >> 32); 1525 addr_low = (uint32_t)addr; 1526 1527 /* 1528 * Make sure this failed load came from this PCIe port. Check by 1529 * matching the upper 32 bits of the address with the ranges property. 1530 */ 1531 range_len = px_p->px_ranges_length / sizeof (px_ranges_t); 1532 i = 0; 1533 for (ranges_p = px_p->px_ranges_p; i < range_len; i++, ranges_p++) { 1534 if (ranges_p->parent_high == addr_high) { 1535 switch (ranges_p->child_high & PCI_ADDR_MASK) { 1536 case PCI_ADDR_CONFIG: 1537 bdf = (pcie_req_id_t)(addr_low >> 12); 1538 break; 1539 default: 1540 bdf = PCIE_INVALID_BDF; 1541 break; 1542 } 1543 break; 1544 } 1545 } 1546 1547 px_rp_en_q(px_p, bdf, addr, NULL); 1548 1549 if (px_fm_enter(px_p) == DDI_SUCCESS) { 1550 (void) px_scan_fabric(px_p, px_p->px_dip, &derr); 1551 px_fm_exit(px_p); 1552 } 1553 } 1554 1555 1556 #ifdef DEBUG 1557 int px_peekfault_cnt = 0; 1558 int px_pokefault_cnt = 0; 1559 #endif /* DEBUG */ 1560 1561 /* 1562 * Do a safe write to a device. 1563 * 1564 * When this function is given a handle (cautious access), all errors are 1565 * suppressed. 1566 * 1567 * When this function is not given a handle (poke), only Unsupported Request 1568 * and Completer Abort errors are suppressed. 1569 * 1570 * In all cases, all errors are returned in the function return status. 1571 */ 1572 1573 int 1574 px_lib_ctlops_poke(dev_info_t *dip, dev_info_t *rdip, 1575 peekpoke_ctlops_t *in_args) 1576 { 1577 px_t *px_p = DIP_TO_STATE(dip); 1578 px_pec_t *pec_p = px_p->px_pec_p; 1579 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)in_args->handle; 1580 1581 size_t repcount = in_args->repcount; 1582 size_t size = in_args->size; 1583 uintptr_t dev_addr = in_args->dev_addr; 1584 uintptr_t host_addr = in_args->host_addr; 1585 1586 int err = DDI_SUCCESS; 1587 uint64_t hvio_poke_status; 1588 uint32_t wrt_stat; 1589 1590 r_addr_t ra; 1591 uint64_t pokeval; 1592 pcie_req_id_t bdf; 1593 1594 ra = (r_addr_t)va_to_pa((void *)dev_addr); 1595 for (; repcount; repcount--) { 1596 1597 switch (size) { 1598 case sizeof (uint8_t): 1599 pokeval = *(uint8_t *)host_addr; 1600 break; 1601 case sizeof (uint16_t): 1602 pokeval = *(uint16_t *)host_addr; 1603 break; 1604 case sizeof (uint32_t): 1605 pokeval = *(uint32_t *)host_addr; 1606 break; 1607 case sizeof (uint64_t): 1608 pokeval = *(uint64_t *)host_addr; 1609 break; 1610 default: 1611 DBG(DBG_MAP, px_p->px_dip, 1612 "poke: invalid size %d passed\n", size); 1613 err = DDI_FAILURE; 1614 goto done; 1615 } 1616 1617 /* 1618 * Grab pokefault mutex since hypervisor does not guarantee 1619 * poke serialization. 1620 */ 1621 if (hp) { 1622 i_ndi_busop_access_enter(hp->ahi_common.ah_dip, 1623 (ddi_acc_handle_t)hp); 1624 pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED; 1625 } else { 1626 mutex_enter(&pec_p->pec_pokefault_mutex); 1627 pec_p->pec_safeacc_type = DDI_FM_ERR_POKE; 1628 } 1629 1630 if (pcie_get_bdf_from_dip(rdip, &bdf) != DDI_SUCCESS) { 1631 err = DDI_FAILURE; 1632 goto done; 1633 } 1634 1635 hvio_poke_status = hvio_poke(px_p->px_dev_hdl, ra, size, 1636 pokeval, bdf << 8, &wrt_stat); 1637 1638 if ((hvio_poke_status != H_EOK) || (wrt_stat != H_EOK)) { 1639 err = DDI_FAILURE; 1640 #ifdef DEBUG 1641 px_pokefault_cnt++; 1642 #endif 1643 /* 1644 * For CAUTIOUS and POKE access, notify FMA to 1645 * cleanup. Imitate a cpu/mem trap call like in sun4u. 1646 */ 1647 px_lib_log_safeacc_err(px_p, (ddi_acc_handle_t)hp, 1648 (hp ? DDI_FM_ERR_EXPECTED : 1649 DDI_FM_ERR_POKE), ra); 1650 1651 pec_p->pec_ontrap_data = NULL; 1652 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1653 if (hp) { 1654 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, 1655 (ddi_acc_handle_t)hp); 1656 } else { 1657 mutex_exit(&pec_p->pec_pokefault_mutex); 1658 } 1659 goto done; 1660 } 1661 1662 pec_p->pec_ontrap_data = NULL; 1663 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1664 if (hp) { 1665 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, 1666 (ddi_acc_handle_t)hp); 1667 } else { 1668 mutex_exit(&pec_p->pec_pokefault_mutex); 1669 } 1670 1671 host_addr += size; 1672 1673 if (in_args->flags == DDI_DEV_AUTOINCR) { 1674 dev_addr += size; 1675 ra = (r_addr_t)va_to_pa((void *)dev_addr); 1676 } 1677 } 1678 1679 done: 1680 return (err); 1681 } 1682 1683 1684 /*ARGSUSED*/ 1685 int 1686 px_lib_ctlops_peek(dev_info_t *dip, dev_info_t *rdip, 1687 peekpoke_ctlops_t *in_args, void *result) 1688 { 1689 px_t *px_p = DIP_TO_STATE(dip); 1690 px_pec_t *pec_p = px_p->px_pec_p; 1691 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)in_args->handle; 1692 1693 size_t repcount = in_args->repcount; 1694 uintptr_t dev_addr = in_args->dev_addr; 1695 uintptr_t host_addr = in_args->host_addr; 1696 1697 r_addr_t ra; 1698 uint32_t read_status; 1699 uint64_t hvio_peek_status; 1700 uint64_t peekval; 1701 int err = DDI_SUCCESS; 1702 1703 result = (void *)in_args->host_addr; 1704 1705 ra = (r_addr_t)va_to_pa((void *)dev_addr); 1706 for (; repcount; repcount--) { 1707 1708 /* Lock pokefault mutex so read doesn't mask a poke fault. */ 1709 if (hp) { 1710 i_ndi_busop_access_enter(hp->ahi_common.ah_dip, 1711 (ddi_acc_handle_t)hp); 1712 pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED; 1713 } else { 1714 mutex_enter(&pec_p->pec_pokefault_mutex); 1715 pec_p->pec_safeacc_type = DDI_FM_ERR_PEEK; 1716 } 1717 1718 hvio_peek_status = hvio_peek(px_p->px_dev_hdl, ra, 1719 in_args->size, &read_status, &peekval); 1720 1721 if ((hvio_peek_status != H_EOK) || (read_status != H_EOK)) { 1722 err = DDI_FAILURE; 1723 1724 /* 1725 * For CAUTIOUS and PEEK access, notify FMA to 1726 * cleanup. Imitate a cpu/mem trap call like in sun4u. 1727 */ 1728 px_lib_log_safeacc_err(px_p, (ddi_acc_handle_t)hp, 1729 (hp ? DDI_FM_ERR_EXPECTED : 1730 DDI_FM_ERR_PEEK), ra); 1731 1732 /* Stuff FFs in host addr if peek. */ 1733 if (hp == NULL) { 1734 int i; 1735 uint8_t *ff_addr = (uint8_t *)host_addr; 1736 for (i = 0; i < in_args->size; i++) 1737 *ff_addr++ = 0xff; 1738 } 1739 #ifdef DEBUG 1740 px_peekfault_cnt++; 1741 #endif 1742 pec_p->pec_ontrap_data = NULL; 1743 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1744 if (hp) { 1745 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, 1746 (ddi_acc_handle_t)hp); 1747 } else { 1748 mutex_exit(&pec_p->pec_pokefault_mutex); 1749 } 1750 goto done; 1751 1752 } 1753 pec_p->pec_ontrap_data = NULL; 1754 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1755 if (hp) { 1756 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, 1757 (ddi_acc_handle_t)hp); 1758 } else { 1759 mutex_exit(&pec_p->pec_pokefault_mutex); 1760 } 1761 1762 switch (in_args->size) { 1763 case sizeof (uint8_t): 1764 *(uint8_t *)host_addr = (uint8_t)peekval; 1765 break; 1766 case sizeof (uint16_t): 1767 *(uint16_t *)host_addr = (uint16_t)peekval; 1768 break; 1769 case sizeof (uint32_t): 1770 *(uint32_t *)host_addr = (uint32_t)peekval; 1771 break; 1772 case sizeof (uint64_t): 1773 *(uint64_t *)host_addr = (uint64_t)peekval; 1774 break; 1775 default: 1776 DBG(DBG_MAP, px_p->px_dip, 1777 "peek: invalid size %d passed\n", 1778 in_args->size); 1779 err = DDI_FAILURE; 1780 goto done; 1781 } 1782 1783 host_addr += in_args->size; 1784 1785 if (in_args->flags == DDI_DEV_AUTOINCR) { 1786 dev_addr += in_args->size; 1787 ra = (r_addr_t)va_to_pa((void *)dev_addr); 1788 } 1789 } 1790 done: 1791 return (err); 1792 } 1793 1794 1795 /* add interrupt vector */ 1796 int 1797 px_err_add_intr(px_fault_t *px_fault_p) 1798 { 1799 px_t *px_p = DIP_TO_STATE(px_fault_p->px_fh_dip); 1800 1801 DBG(DBG_LIB_INT, px_p->px_dip, 1802 "px_err_add_intr: calling add_ivintr"); 1803 1804 VERIFY(add_ivintr(px_fault_p->px_fh_sysino, PX_ERR_PIL, 1805 (intrfunc)px_fault_p->px_err_func, (caddr_t)px_fault_p, NULL, 1806 (caddr_t)&px_fault_p->px_intr_payload[0]) == 0); 1807 1808 DBG(DBG_LIB_INT, px_p->px_dip, 1809 "px_err_add_intr: ib_intr_enable "); 1810 1811 px_ib_intr_enable(px_p, intr_dist_cpuid(), px_fault_p->px_intr_ino); 1812 1813 return (DDI_SUCCESS); 1814 } 1815 1816 /* remove interrupt vector */ 1817 void 1818 px_err_rem_intr(px_fault_t *px_fault_p) 1819 { 1820 px_t *px_p = DIP_TO_STATE(px_fault_p->px_fh_dip); 1821 1822 px_ib_intr_disable(px_p->px_ib_p, px_fault_p->px_intr_ino, 1823 IB_INTR_WAIT); 1824 1825 VERIFY(rem_ivintr(px_fault_p->px_fh_sysino, PX_ERR_PIL) == 0); 1826 } 1827 1828 void 1829 px_cb_intr_redist(void *arg) 1830 { 1831 px_t *px_p = (px_t *)arg; 1832 px_ib_intr_dist_en(px_p->px_dip, intr_dist_cpuid(), 1833 px_p->px_inos[PX_INTR_XBC], B_FALSE); 1834 } 1835 1836 int 1837 px_cb_add_intr(px_fault_t *f_p) 1838 { 1839 px_t *px_p = DIP_TO_STATE(f_p->px_fh_dip); 1840 1841 DBG(DBG_LIB_INT, px_p->px_dip, 1842 "px_err_add_intr: calling add_ivintr"); 1843 1844 VERIFY(add_ivintr(f_p->px_fh_sysino, PX_ERR_PIL, 1845 (intrfunc)f_p->px_err_func, (caddr_t)f_p, NULL, 1846 (caddr_t)&f_p->px_intr_payload[0]) == 0); 1847 1848 intr_dist_add(px_cb_intr_redist, px_p); 1849 1850 DBG(DBG_LIB_INT, px_p->px_dip, 1851 "px_err_add_intr: ib_intr_enable "); 1852 1853 px_ib_intr_enable(px_p, intr_dist_cpuid(), f_p->px_intr_ino); 1854 1855 return (DDI_SUCCESS); 1856 } 1857 1858 void 1859 px_cb_rem_intr(px_fault_t *f_p) 1860 { 1861 intr_dist_rem(px_cb_intr_redist, DIP_TO_STATE(f_p->px_fh_dip)); 1862 px_err_rem_intr(f_p); 1863 } 1864 1865 #ifdef FMA 1866 void 1867 px_fill_rc_status(px_fault_t *px_fault_p, pciex_rc_error_regs_t *rc_status) 1868 { 1869 px_pec_err_t *err_pkt; 1870 1871 err_pkt = (px_pec_err_t *)px_fault_p->px_intr_payload; 1872 1873 /* initialise all the structure members */ 1874 rc_status->status_valid = 0; 1875 1876 if (err_pkt->pec_descr.P) { 1877 /* PCI Status Register */ 1878 rc_status->pci_err_status = err_pkt->pci_err_status; 1879 rc_status->status_valid |= PCI_ERR_STATUS_VALID; 1880 } 1881 1882 if (err_pkt->pec_descr.E) { 1883 /* PCIe Status Register */ 1884 rc_status->pcie_err_status = err_pkt->pcie_err_status; 1885 rc_status->status_valid |= PCIE_ERR_STATUS_VALID; 1886 } 1887 1888 if (err_pkt->pec_descr.U) { 1889 rc_status->ue_status = err_pkt->ue_reg_status; 1890 rc_status->status_valid |= UE_STATUS_VALID; 1891 } 1892 1893 if (err_pkt->pec_descr.H) { 1894 rc_status->ue_hdr1 = err_pkt->hdr[0]; 1895 rc_status->status_valid |= UE_HDR1_VALID; 1896 } 1897 1898 if (err_pkt->pec_descr.I) { 1899 rc_status->ue_hdr2 = err_pkt->hdr[1]; 1900 rc_status->status_valid |= UE_HDR2_VALID; 1901 } 1902 1903 /* ue_fst_err_ptr - not available for sun4v?? */ 1904 1905 1906 if (err_pkt->pec_descr.S) { 1907 rc_status->source_id = err_pkt->err_src_reg; 1908 rc_status->status_valid |= SOURCE_ID_VALID; 1909 } 1910 1911 if (err_pkt->pec_descr.R) { 1912 rc_status->root_err_status = err_pkt->root_err_status; 1913 rc_status->status_valid |= CE_STATUS_VALID; 1914 } 1915 } 1916 #endif 1917 1918 /*ARGSUSED*/ 1919 int 1920 px_lib_pmctl(int cmd, px_t *px_p) 1921 { 1922 return (DDI_FAILURE); 1923 } 1924 1925 /*ARGSUSED*/ 1926 uint_t 1927 px_pmeq_intr(caddr_t arg) 1928 { 1929 return (DDI_INTR_CLAIMED); 1930 } 1931 1932 /* 1933 * Unprotected raw reads/writes of fabric device's config space. 1934 * Only used for temporary PCI-E Fabric Error Handling. 1935 */ 1936 uint32_t 1937 px_fab_get(px_t *px_p, pcie_req_id_t bdf, uint16_t offset) { 1938 uint64_t data = 0; 1939 1940 (void) hvio_config_get(px_p->px_dev_hdl, 1941 (bdf << PX_RA_BDF_SHIFT), offset, 4, 1942 (pci_cfg_data_t *)&data); 1943 1944 return ((uint32_t)data); 1945 } 1946 1947 void 1948 px_fab_set(px_t *px_p, pcie_req_id_t bdf, uint16_t offset, 1949 uint32_t val) { 1950 pci_cfg_data_t wdata = { 0 }; 1951 1952 wdata.qw = (uint32_t)val; 1953 (void) hvio_config_put(px_p->px_dev_hdl, 1954 (bdf << PX_RA_BDF_SHIFT), offset, 4, wdata); 1955 } 1956 1957 /*ARGSUSED*/ 1958 int 1959 px_lib_hotplug_init(dev_info_t *dip, void *arg) 1960 { 1961 return (DDI_ENOTSUP); 1962 } 1963 1964 /*ARGSUSED*/ 1965 void 1966 px_lib_hotplug_uninit(dev_info_t *dip) 1967 { 1968 } 1969 1970 /*ARGSUSED*/ 1971 void 1972 px_hp_intr_redist(px_t *px_p) 1973 { 1974 } 1975 1976 /* Dummy cpr add callback */ 1977 /*ARGSUSED*/ 1978 void 1979 px_cpr_add_callb(px_t *px_p) 1980 { 1981 } 1982 1983 /* Dummy cpr rem callback */ 1984 /*ARGSUSED*/ 1985 void 1986 px_cpr_rem_callb(px_t *px_p) 1987 { 1988 } 1989 1990 /*ARGSUSED*/ 1991 boolean_t 1992 px_lib_is_in_drain_state(px_t *px_p) 1993 { 1994 return (B_FALSE); 1995 } 1996 1997 /* 1998 * There is no IOAPI to get the BDF of the pcie root port nexus at this moment. 1999 * Assume it is 0x0000, until otherwise noted. For now, all sun4v platforms 2000 * have programmed the BDF to be 0x0000. 2001 */ 2002 /*ARGSUSED*/ 2003 pcie_req_id_t 2004 px_lib_get_bdf(px_t *px_p) 2005 { 2006 return (0x0000); 2007 } 2008 2009 int 2010 px_lib_get_root_complex_mps(px_t *px_p, dev_info_t *dip, int *mps) 2011 { 2012 pci_device_t bdf = px_lib_get_bdf(px_p); 2013 2014 if (hvio_get_rp_mps_cap(DIP_TO_HANDLE(dip), bdf, mps) == H_EOK) 2015 return (DDI_SUCCESS); 2016 else 2017 return (DDI_FAILURE); 2018 } 2019 2020 int 2021 px_lib_set_root_complex_mps(px_t *px_p, dev_info_t *dip, int mps) 2022 { 2023 pci_device_t bdf = px_lib_get_bdf(px_p); 2024 2025 if (hvio_set_rp_mps(DIP_TO_HANDLE(dip), bdf, mps) == H_EOK) 2026 return (DDI_SUCCESS); 2027 else 2028 return (DDI_FAILURE); 2029 } 2030