1 /* 2 * Copyright 2023 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include "priv.h" 23 24 #include <core/pci.h> 25 #include <subdev/timer.h> 26 #include <subdev/vfn.h> 27 #include <engine/fifo/chan.h> 28 #include <engine/sec2.h> 29 30 #include <nvfw/fw.h> 31 32 #include <nvrm/nvtypes.h> 33 #include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h> 34 #include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h> 35 #include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h> 36 #include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h> 37 #include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h> 38 #include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h> 39 #include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h> 40 #include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h> 41 #include <nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h> 42 #include <nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h> 43 #include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h> 44 #include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h> 45 #include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h> 46 #include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h> 47 #include <nvrm/535.113.01/nvidia/generated/g_allclasses.h> 48 #include <nvrm/535.113.01/nvidia/generated/g_os_nvoc.h> 49 #include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h> 50 #include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h> 51 #include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h> 52 #include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h> 53 #include <nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h> 54 #include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h> 55 56 #include <linux/acpi.h> 57 58 #define GSP_MSG_MIN_SIZE GSP_PAGE_SIZE 59 #define GSP_MSG_MAX_SIZE GSP_PAGE_MIN_SIZE * 16 60 61 struct r535_gsp_msg { 62 u8 auth_tag_buffer[16]; 63 u8 aad_buffer[16]; 64 u32 checksum; 65 u32 sequence; 66 u32 elem_count; 67 u32 pad; 68 u8 data[]; 69 }; 70 71 #define GSP_MSG_HDR_SIZE offsetof(struct r535_gsp_msg, data) 72 73 static void * 74 r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 repc, u32 *prepc, int *ptime) 75 { 76 struct r535_gsp_msg *mqe; 77 u32 size, rptr = *gsp->msgq.rptr; 78 int used; 79 u8 *msg; 80 u32 len; 81 82 size = DIV_ROUND_UP(GSP_MSG_HDR_SIZE + repc, GSP_PAGE_SIZE); 83 if (WARN_ON(!size || size >= gsp->msgq.cnt)) 84 return ERR_PTR(-EINVAL); 85 86 do { 87 u32 wptr = *gsp->msgq.wptr; 88 89 used = wptr + gsp->msgq.cnt - rptr; 90 if (used >= gsp->msgq.cnt) 91 used -= gsp->msgq.cnt; 92 if (used >= size) 93 break; 94 95 usleep_range(1, 2); 96 } while (--(*ptime)); 97 98 if (WARN_ON(!*ptime)) 99 return ERR_PTR(-ETIMEDOUT); 100 101 mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + rptr * 0x1000); 102 103 if (prepc) { 104 *prepc = (used * GSP_PAGE_SIZE) - sizeof(*mqe); 105 return mqe->data; 106 } 107 108 msg = kvmalloc(repc, GFP_KERNEL); 109 if (!msg) 110 return ERR_PTR(-ENOMEM); 111 112 len = ((gsp->msgq.cnt - rptr) * GSP_PAGE_SIZE) - sizeof(*mqe); 113 len = min_t(u32, repc, len); 114 memcpy(msg, mqe->data, len); 115 116 rptr += DIV_ROUND_UP(len, GSP_PAGE_SIZE); 117 if (rptr == gsp->msgq.cnt) 118 rptr = 0; 119 120 repc -= len; 121 122 if (repc) { 123 mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + 0 * 0x1000); 124 memcpy(msg + len, mqe, repc); 125 126 rptr += DIV_ROUND_UP(repc, GSP_PAGE_SIZE); 127 } 128 129 mb(); 130 (*gsp->msgq.rptr) = rptr; 131 return msg; 132 } 133 134 static void * 135 r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 repc, int *ptime) 136 { 137 return r535_gsp_msgq_wait(gsp, repc, NULL, ptime); 138 } 139 140 static int 141 r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *argv) 142 { 143 struct r535_gsp_msg *cmd = container_of(argv, typeof(*cmd), data); 144 struct r535_gsp_msg *cqe; 145 u32 argc = cmd->checksum; 146 u64 *ptr = (void *)cmd; 147 u64 *end; 148 u64 csum = 0; 149 int free, time = 1000000; 150 u32 wptr, size; 151 u32 off = 0; 152 153 argc = ALIGN(GSP_MSG_HDR_SIZE + argc, GSP_PAGE_SIZE); 154 155 end = (u64 *)((char *)ptr + argc); 156 cmd->pad = 0; 157 cmd->checksum = 0; 158 cmd->sequence = gsp->cmdq.seq++; 159 cmd->elem_count = DIV_ROUND_UP(argc, 0x1000); 160 161 while (ptr < end) 162 csum ^= *ptr++; 163 164 cmd->checksum = upper_32_bits(csum) ^ lower_32_bits(csum); 165 166 wptr = *gsp->cmdq.wptr; 167 do { 168 do { 169 free = *gsp->cmdq.rptr + gsp->cmdq.cnt - wptr - 1; 170 if (free >= gsp->cmdq.cnt) 171 free -= gsp->cmdq.cnt; 172 if (free >= 1) 173 break; 174 175 usleep_range(1, 2); 176 } while(--time); 177 178 if (WARN_ON(!time)) { 179 kvfree(cmd); 180 return -ETIMEDOUT; 181 } 182 183 cqe = (void *)((u8 *)gsp->shm.cmdq.ptr + 0x1000 + wptr * 0x1000); 184 size = min_t(u32, argc, (gsp->cmdq.cnt - wptr) * GSP_PAGE_SIZE); 185 memcpy(cqe, (u8 *)cmd + off, size); 186 187 wptr += DIV_ROUND_UP(size, 0x1000); 188 if (wptr == gsp->cmdq.cnt) 189 wptr = 0; 190 191 off += size; 192 argc -= size; 193 } while(argc); 194 195 nvkm_trace(&gsp->subdev, "cmdq: wptr %d\n", wptr); 196 wmb(); 197 (*gsp->cmdq.wptr) = wptr; 198 mb(); 199 200 nvkm_falcon_wr32(&gsp->falcon, 0xc00, 0x00000000); 201 202 kvfree(cmd); 203 return 0; 204 } 205 206 static void * 207 r535_gsp_cmdq_get(struct nvkm_gsp *gsp, u32 argc) 208 { 209 struct r535_gsp_msg *cmd; 210 u32 size = GSP_MSG_HDR_SIZE + argc; 211 212 size = ALIGN(size, GSP_MSG_MIN_SIZE); 213 cmd = kvzalloc(size, GFP_KERNEL); 214 if (!cmd) 215 return ERR_PTR(-ENOMEM); 216 217 cmd->checksum = argc; 218 return cmd->data; 219 } 220 221 struct nvfw_gsp_rpc { 222 u32 header_version; 223 u32 signature; 224 u32 length; 225 u32 function; 226 u32 rpc_result; 227 u32 rpc_result_private; 228 u32 sequence; 229 union { 230 u32 spare; 231 u32 cpuRmGfid; 232 }; 233 u8 data[]; 234 }; 235 236 static void 237 r535_gsp_msg_done(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg) 238 { 239 kvfree(msg); 240 } 241 242 static void 243 r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl) 244 { 245 if (gsp->subdev.debug >= lvl) { 246 nvkm_printk__(&gsp->subdev, lvl, info, 247 "msg fn:%d len:0x%x/0x%zx res:0x%x resp:0x%x\n", 248 msg->function, msg->length, msg->length - sizeof(*msg), 249 msg->rpc_result, msg->rpc_result_private); 250 print_hex_dump(KERN_INFO, "msg: ", DUMP_PREFIX_OFFSET, 16, 1, 251 msg->data, msg->length - sizeof(*msg), true); 252 } 253 } 254 255 static struct nvfw_gsp_rpc * 256 r535_gsp_msg_recv(struct nvkm_gsp *gsp, int fn, u32 repc) 257 { 258 struct nvkm_subdev *subdev = &gsp->subdev; 259 struct nvfw_gsp_rpc *msg; 260 int time = 4000000, i; 261 u32 size; 262 263 retry: 264 msg = r535_gsp_msgq_wait(gsp, sizeof(*msg), &size, &time); 265 if (IS_ERR_OR_NULL(msg)) 266 return msg; 267 268 msg = r535_gsp_msgq_recv(gsp, msg->length, &time); 269 if (IS_ERR_OR_NULL(msg)) 270 return msg; 271 272 if (msg->rpc_result) { 273 r535_gsp_msg_dump(gsp, msg, NV_DBG_ERROR); 274 r535_gsp_msg_done(gsp, msg); 275 return ERR_PTR(-EINVAL); 276 } 277 278 r535_gsp_msg_dump(gsp, msg, NV_DBG_TRACE); 279 280 if (fn && msg->function == fn) { 281 if (repc) { 282 if (msg->length < sizeof(*msg) + repc) { 283 nvkm_error(subdev, "msg len %d < %zd\n", 284 msg->length, sizeof(*msg) + repc); 285 r535_gsp_msg_dump(gsp, msg, NV_DBG_ERROR); 286 r535_gsp_msg_done(gsp, msg); 287 return ERR_PTR(-EIO); 288 } 289 290 return msg; 291 } 292 293 r535_gsp_msg_done(gsp, msg); 294 return NULL; 295 } 296 297 for (i = 0; i < gsp->msgq.ntfy_nr; i++) { 298 struct nvkm_gsp_msgq_ntfy *ntfy = &gsp->msgq.ntfy[i]; 299 300 if (ntfy->fn == msg->function) { 301 ntfy->func(ntfy->priv, ntfy->fn, msg->data, msg->length - sizeof(*msg)); 302 break; 303 } 304 } 305 306 if (i == gsp->msgq.ntfy_nr) 307 r535_gsp_msg_dump(gsp, msg, NV_DBG_WARN); 308 309 r535_gsp_msg_done(gsp, msg); 310 if (fn) 311 goto retry; 312 313 if (*gsp->msgq.rptr != *gsp->msgq.wptr) 314 goto retry; 315 316 return NULL; 317 } 318 319 static int 320 r535_gsp_msg_ntfy_add(struct nvkm_gsp *gsp, u32 fn, nvkm_gsp_msg_ntfy_func func, void *priv) 321 { 322 int ret = 0; 323 324 mutex_lock(&gsp->msgq.mutex); 325 if (WARN_ON(gsp->msgq.ntfy_nr >= ARRAY_SIZE(gsp->msgq.ntfy))) { 326 ret = -ENOSPC; 327 } else { 328 gsp->msgq.ntfy[gsp->msgq.ntfy_nr].fn = fn; 329 gsp->msgq.ntfy[gsp->msgq.ntfy_nr].func = func; 330 gsp->msgq.ntfy[gsp->msgq.ntfy_nr].priv = priv; 331 gsp->msgq.ntfy_nr++; 332 } 333 mutex_unlock(&gsp->msgq.mutex); 334 return ret; 335 } 336 337 static int 338 r535_gsp_rpc_poll(struct nvkm_gsp *gsp, u32 fn) 339 { 340 void *repv; 341 342 mutex_lock(&gsp->cmdq.mutex); 343 repv = r535_gsp_msg_recv(gsp, fn, 0); 344 mutex_unlock(&gsp->cmdq.mutex); 345 if (IS_ERR(repv)) 346 return PTR_ERR(repv); 347 348 return 0; 349 } 350 351 static void * 352 r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc) 353 { 354 struct nvfw_gsp_rpc *rpc = container_of(argv, typeof(*rpc), data); 355 struct nvfw_gsp_rpc *msg; 356 u32 fn = rpc->function; 357 void *repv = NULL; 358 int ret; 359 360 if (gsp->subdev.debug >= NV_DBG_TRACE) { 361 nvkm_trace(&gsp->subdev, "rpc fn:%d len:0x%x/0x%zx\n", rpc->function, 362 rpc->length, rpc->length - sizeof(*rpc)); 363 print_hex_dump(KERN_INFO, "rpc: ", DUMP_PREFIX_OFFSET, 16, 1, 364 rpc->data, rpc->length - sizeof(*rpc), true); 365 } 366 367 ret = r535_gsp_cmdq_push(gsp, rpc); 368 if (ret) 369 return ERR_PTR(ret); 370 371 if (wait) { 372 msg = r535_gsp_msg_recv(gsp, fn, repc); 373 if (!IS_ERR_OR_NULL(msg)) 374 repv = msg->data; 375 else 376 repv = msg; 377 } 378 379 return repv; 380 } 381 382 static void 383 r535_gsp_event_dtor(struct nvkm_gsp_event *event) 384 { 385 struct nvkm_gsp_device *device = event->device; 386 struct nvkm_gsp_client *client = device->object.client; 387 struct nvkm_gsp *gsp = client->gsp; 388 389 mutex_lock(&gsp->client_id.mutex); 390 if (event->func) { 391 list_del(&event->head); 392 event->func = NULL; 393 } 394 mutex_unlock(&gsp->client_id.mutex); 395 396 nvkm_gsp_rm_free(&event->object); 397 event->device = NULL; 398 } 399 400 static int 401 r535_gsp_device_event_get(struct nvkm_gsp_event *event) 402 { 403 struct nvkm_gsp_device *device = event->device; 404 NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *ctrl; 405 406 ctrl = nvkm_gsp_rm_ctrl_get(&device->subdevice, 407 NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, sizeof(*ctrl)); 408 if (IS_ERR(ctrl)) 409 return PTR_ERR(ctrl); 410 411 ctrl->event = event->id; 412 ctrl->action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT; 413 return nvkm_gsp_rm_ctrl_wr(&device->subdevice, ctrl); 414 } 415 416 static int 417 r535_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id, 418 nvkm_gsp_event_func func, struct nvkm_gsp_event *event) 419 { 420 struct nvkm_gsp_client *client = device->object.client; 421 struct nvkm_gsp *gsp = client->gsp; 422 NV0005_ALLOC_PARAMETERS *args; 423 int ret; 424 425 args = nvkm_gsp_rm_alloc_get(&device->subdevice, handle, 426 NV01_EVENT_KERNEL_CALLBACK_EX, sizeof(*args), 427 &event->object); 428 if (IS_ERR(args)) 429 return PTR_ERR(args); 430 431 args->hParentClient = client->object.handle; 432 args->hSrcResource = 0; 433 args->hClass = NV01_EVENT_KERNEL_CALLBACK_EX; 434 args->notifyIndex = NV01_EVENT_CLIENT_RM | id; 435 args->data = NULL; 436 437 ret = nvkm_gsp_rm_alloc_wr(&event->object, args); 438 if (ret) 439 return ret; 440 441 event->device = device; 442 event->id = id; 443 444 ret = r535_gsp_device_event_get(event); 445 if (ret) { 446 nvkm_gsp_event_dtor(event); 447 return ret; 448 } 449 450 mutex_lock(&gsp->client_id.mutex); 451 event->func = func; 452 list_add(&event->head, &client->events); 453 mutex_unlock(&gsp->client_id.mutex); 454 return 0; 455 } 456 457 static void 458 r535_gsp_device_dtor(struct nvkm_gsp_device *device) 459 { 460 nvkm_gsp_rm_free(&device->subdevice); 461 nvkm_gsp_rm_free(&device->object); 462 } 463 464 static int 465 r535_gsp_subdevice_ctor(struct nvkm_gsp_device *device) 466 { 467 NV2080_ALLOC_PARAMETERS *args; 468 469 return nvkm_gsp_rm_alloc(&device->object, 0x5d1d0000, NV20_SUBDEVICE_0, sizeof(*args), 470 &device->subdevice); 471 } 472 473 static int 474 r535_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device) 475 { 476 NV0080_ALLOC_PARAMETERS *args; 477 int ret; 478 479 args = nvkm_gsp_rm_alloc_get(&client->object, 0xde1d0000, NV01_DEVICE_0, sizeof(*args), 480 &device->object); 481 if (IS_ERR(args)) 482 return PTR_ERR(args); 483 484 args->hClientShare = client->object.handle; 485 486 ret = nvkm_gsp_rm_alloc_wr(&device->object, args); 487 if (ret) 488 return ret; 489 490 ret = r535_gsp_subdevice_ctor(device); 491 if (ret) 492 nvkm_gsp_rm_free(&device->object); 493 494 return ret; 495 } 496 497 static void 498 r535_gsp_client_dtor(struct nvkm_gsp_client *client) 499 { 500 struct nvkm_gsp *gsp = client->gsp; 501 502 nvkm_gsp_rm_free(&client->object); 503 504 mutex_lock(&gsp->client_id.mutex); 505 idr_remove(&gsp->client_id.idr, client->object.handle & 0xffff); 506 mutex_unlock(&gsp->client_id.mutex); 507 508 client->gsp = NULL; 509 } 510 511 static int 512 r535_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client) 513 { 514 NV0000_ALLOC_PARAMETERS *args; 515 int ret; 516 517 mutex_lock(&gsp->client_id.mutex); 518 ret = idr_alloc(&gsp->client_id.idr, client, 0, 0xffff + 1, GFP_KERNEL); 519 mutex_unlock(&gsp->client_id.mutex); 520 if (ret < 0) 521 return ret; 522 523 client->gsp = gsp; 524 client->object.client = client; 525 INIT_LIST_HEAD(&client->events); 526 527 args = nvkm_gsp_rm_alloc_get(&client->object, 0xc1d00000 | ret, NV01_ROOT, sizeof(*args), 528 &client->object); 529 if (IS_ERR(args)) { 530 r535_gsp_client_dtor(client); 531 return ret; 532 } 533 534 args->hClient = client->object.handle; 535 args->processID = ~0; 536 537 ret = nvkm_gsp_rm_alloc_wr(&client->object, args); 538 if (ret) { 539 r535_gsp_client_dtor(client); 540 return ret; 541 } 542 543 return 0; 544 } 545 546 static int 547 r535_gsp_rpc_rm_free(struct nvkm_gsp_object *object) 548 { 549 struct nvkm_gsp_client *client = object->client; 550 struct nvkm_gsp *gsp = client->gsp; 551 rpc_free_v03_00 *rpc; 552 553 nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x free\n", 554 client->object.handle, object->handle); 555 556 rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_FREE, sizeof(*rpc)); 557 if (WARN_ON(IS_ERR_OR_NULL(rpc))) 558 return -EIO; 559 560 rpc->params.hRoot = client->object.handle; 561 rpc->params.hObjectParent = 0; 562 rpc->params.hObjectOld = object->handle; 563 return nvkm_gsp_rpc_wr(gsp, rpc, true); 564 } 565 566 static void 567 r535_gsp_rpc_rm_alloc_done(struct nvkm_gsp_object *object, void *repv) 568 { 569 rpc_gsp_rm_alloc_v03_00 *rpc = container_of(repv, typeof(*rpc), params); 570 571 nvkm_gsp_rpc_done(object->client->gsp, rpc); 572 } 573 574 static void * 575 r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *argv, u32 repc) 576 { 577 rpc_gsp_rm_alloc_v03_00 *rpc = container_of(argv, typeof(*rpc), params); 578 struct nvkm_gsp *gsp = object->client->gsp; 579 void *ret; 580 581 rpc = nvkm_gsp_rpc_push(gsp, rpc, true, sizeof(*rpc) + repc); 582 if (IS_ERR_OR_NULL(rpc)) 583 return rpc; 584 585 if (rpc->status) { 586 nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status); 587 ret = ERR_PTR(-EINVAL); 588 } else { 589 ret = repc ? rpc->params : NULL; 590 } 591 592 if (IS_ERR_OR_NULL(ret)) 593 nvkm_gsp_rpc_done(gsp, rpc); 594 595 return ret; 596 } 597 598 static void * 599 r535_gsp_rpc_rm_alloc_get(struct nvkm_gsp_object *object, u32 oclass, u32 argc) 600 { 601 struct nvkm_gsp_client *client = object->client; 602 struct nvkm_gsp *gsp = client->gsp; 603 rpc_gsp_rm_alloc_v03_00 *rpc; 604 605 nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x new obj:0x%08x cls:0x%08x argc:%d\n", 606 client->object.handle, object->parent->handle, object->handle, oclass, argc); 607 608 rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC, sizeof(*rpc) + argc); 609 if (IS_ERR(rpc)) 610 return rpc; 611 612 rpc->hClient = client->object.handle; 613 rpc->hParent = object->parent->handle; 614 rpc->hObject = object->handle; 615 rpc->hClass = oclass; 616 rpc->status = 0; 617 rpc->paramsSize = argc; 618 return rpc->params; 619 } 620 621 static void 622 r535_gsp_rpc_rm_ctrl_done(struct nvkm_gsp_object *object, void *repv) 623 { 624 rpc_gsp_rm_control_v03_00 *rpc = container_of(repv, typeof(*rpc), params); 625 626 nvkm_gsp_rpc_done(object->client->gsp, rpc); 627 } 628 629 static void * 630 r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void *argv, u32 repc) 631 { 632 rpc_gsp_rm_control_v03_00 *rpc = container_of(argv, typeof(*rpc), params); 633 struct nvkm_gsp *gsp = object->client->gsp; 634 void *ret; 635 636 rpc = nvkm_gsp_rpc_push(gsp, rpc, true, repc); 637 if (IS_ERR_OR_NULL(rpc)) 638 return rpc; 639 640 if (rpc->status) { 641 nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n", 642 object->client->object.handle, object->handle, rpc->cmd, rpc->status); 643 ret = ERR_PTR(-EINVAL); 644 } else { 645 ret = repc ? rpc->params : NULL; 646 } 647 648 if (IS_ERR_OR_NULL(ret)) 649 nvkm_gsp_rpc_done(gsp, rpc); 650 651 return ret; 652 } 653 654 static void * 655 r535_gsp_rpc_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 argc) 656 { 657 struct nvkm_gsp_client *client = object->client; 658 struct nvkm_gsp *gsp = client->gsp; 659 rpc_gsp_rm_control_v03_00 *rpc; 660 661 nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x argc:%d\n", 662 client->object.handle, object->handle, cmd, argc); 663 664 rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL, sizeof(*rpc) + argc); 665 if (IS_ERR(rpc)) 666 return rpc; 667 668 rpc->hClient = client->object.handle; 669 rpc->hObject = object->handle; 670 rpc->cmd = cmd; 671 rpc->status = 0; 672 rpc->paramsSize = argc; 673 return rpc->params; 674 } 675 676 static void 677 r535_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv) 678 { 679 struct nvfw_gsp_rpc *rpc = container_of(repv, typeof(*rpc), data); 680 681 r535_gsp_msg_done(gsp, rpc); 682 } 683 684 static void * 685 r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 argc) 686 { 687 struct nvfw_gsp_rpc *rpc; 688 689 rpc = r535_gsp_cmdq_get(gsp, ALIGN(sizeof(*rpc) + argc, sizeof(u64))); 690 if (IS_ERR(rpc)) 691 return ERR_CAST(rpc); 692 693 rpc->header_version = 0x03000000; 694 rpc->signature = ('C' << 24) | ('P' << 16) | ('R' << 8) | 'V'; 695 rpc->function = fn; 696 rpc->rpc_result = 0xffffffff; 697 rpc->rpc_result_private = 0xffffffff; 698 rpc->length = sizeof(*rpc) + argc; 699 return rpc->data; 700 } 701 702 static void * 703 r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc) 704 { 705 struct nvfw_gsp_rpc *rpc = container_of(argv, typeof(*rpc), data); 706 struct r535_gsp_msg *cmd = container_of((void *)rpc, typeof(*cmd), data); 707 const u32 max_msg_size = (16 * 0x1000) - sizeof(struct r535_gsp_msg); 708 const u32 max_rpc_size = max_msg_size - sizeof(*rpc); 709 u32 rpc_size = rpc->length - sizeof(*rpc); 710 void *repv; 711 712 mutex_lock(&gsp->cmdq.mutex); 713 if (rpc_size > max_rpc_size) { 714 const u32 fn = rpc->function; 715 716 /* Adjust length, and send initial RPC. */ 717 rpc->length = sizeof(*rpc) + max_rpc_size; 718 cmd->checksum = rpc->length; 719 720 repv = r535_gsp_rpc_send(gsp, argv, false, 0); 721 if (IS_ERR(repv)) 722 goto done; 723 724 argv += max_rpc_size; 725 rpc_size -= max_rpc_size; 726 727 /* Remaining chunks sent as CONTINUATION_RECORD RPCs. */ 728 while (rpc_size) { 729 u32 size = min(rpc_size, max_rpc_size); 730 void *next; 731 732 next = r535_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD, size); 733 if (IS_ERR(next)) { 734 repv = next; 735 goto done; 736 } 737 738 memcpy(next, argv, size); 739 740 repv = r535_gsp_rpc_send(gsp, next, false, 0); 741 if (IS_ERR(repv)) 742 goto done; 743 744 argv += size; 745 rpc_size -= size; 746 } 747 748 /* Wait for reply. */ 749 if (wait) { 750 rpc = r535_gsp_msg_recv(gsp, fn, repc); 751 if (!IS_ERR_OR_NULL(rpc)) 752 repv = rpc->data; 753 else 754 repv = rpc; 755 } else { 756 repv = NULL; 757 } 758 } else { 759 repv = r535_gsp_rpc_send(gsp, argv, wait, repc); 760 } 761 762 done: 763 mutex_unlock(&gsp->cmdq.mutex); 764 return repv; 765 } 766 767 const struct nvkm_gsp_rm 768 r535_gsp_rm = { 769 .rpc_get = r535_gsp_rpc_get, 770 .rpc_push = r535_gsp_rpc_push, 771 .rpc_done = r535_gsp_rpc_done, 772 773 .rm_ctrl_get = r535_gsp_rpc_rm_ctrl_get, 774 .rm_ctrl_push = r535_gsp_rpc_rm_ctrl_push, 775 .rm_ctrl_done = r535_gsp_rpc_rm_ctrl_done, 776 777 .rm_alloc_get = r535_gsp_rpc_rm_alloc_get, 778 .rm_alloc_push = r535_gsp_rpc_rm_alloc_push, 779 .rm_alloc_done = r535_gsp_rpc_rm_alloc_done, 780 781 .rm_free = r535_gsp_rpc_rm_free, 782 783 .client_ctor = r535_gsp_client_ctor, 784 .client_dtor = r535_gsp_client_dtor, 785 786 .device_ctor = r535_gsp_device_ctor, 787 .device_dtor = r535_gsp_device_dtor, 788 789 .event_ctor = r535_gsp_device_event_ctor, 790 .event_dtor = r535_gsp_event_dtor, 791 }; 792 793 static void 794 r535_gsp_msgq_work(struct work_struct *work) 795 { 796 struct nvkm_gsp *gsp = container_of(work, typeof(*gsp), msgq.work); 797 798 mutex_lock(&gsp->cmdq.mutex); 799 if (*gsp->msgq.rptr != *gsp->msgq.wptr) 800 r535_gsp_msg_recv(gsp, 0, 0); 801 mutex_unlock(&gsp->cmdq.mutex); 802 } 803 804 static irqreturn_t 805 r535_gsp_intr(struct nvkm_inth *inth) 806 { 807 struct nvkm_gsp *gsp = container_of(inth, typeof(*gsp), subdev.inth); 808 struct nvkm_subdev *subdev = &gsp->subdev; 809 u32 intr = nvkm_falcon_rd32(&gsp->falcon, 0x0008); 810 u32 inte = nvkm_falcon_rd32(&gsp->falcon, gsp->falcon.func->addr2 + 811 gsp->falcon.func->riscv_irqmask); 812 u32 stat = intr & inte; 813 814 if (!stat) { 815 nvkm_debug(subdev, "inte %08x %08x\n", intr, inte); 816 return IRQ_NONE; 817 } 818 819 if (stat & 0x00000040) { 820 nvkm_falcon_wr32(&gsp->falcon, 0x004, 0x00000040); 821 schedule_work(&gsp->msgq.work); 822 stat &= ~0x00000040; 823 } 824 825 if (stat) { 826 nvkm_error(subdev, "intr %08x\n", stat); 827 nvkm_falcon_wr32(&gsp->falcon, 0x014, stat); 828 nvkm_falcon_wr32(&gsp->falcon, 0x004, stat); 829 } 830 831 nvkm_falcon_intr_retrigger(&gsp->falcon); 832 return IRQ_HANDLED; 833 } 834 835 static int 836 r535_gsp_intr_get_table(struct nvkm_gsp *gsp) 837 { 838 NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS *ctrl; 839 int ret = 0; 840 841 ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, 842 NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE, sizeof(*ctrl)); 843 if (IS_ERR(ctrl)) 844 return PTR_ERR(ctrl); 845 846 ctrl = nvkm_gsp_rm_ctrl_push(&gsp->internal.device.subdevice, ctrl, sizeof(*ctrl)); 847 if (WARN_ON(IS_ERR(ctrl))) 848 return PTR_ERR(ctrl); 849 850 for (unsigned i = 0; i < ctrl->tableLen; i++) { 851 enum nvkm_subdev_type type; 852 int inst; 853 854 nvkm_debug(&gsp->subdev, 855 "%2d: engineIdx %3d pmcIntrMask %08x stall %08x nonStall %08x\n", i, 856 ctrl->table[i].engineIdx, ctrl->table[i].pmcIntrMask, 857 ctrl->table[i].vectorStall, ctrl->table[i].vectorNonStall); 858 859 switch (ctrl->table[i].engineIdx) { 860 case MC_ENGINE_IDX_GSP: 861 type = NVKM_SUBDEV_GSP; 862 inst = 0; 863 break; 864 case MC_ENGINE_IDX_DISP: 865 type = NVKM_ENGINE_DISP; 866 inst = 0; 867 break; 868 case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE9: 869 type = NVKM_ENGINE_CE; 870 inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_CE0; 871 break; 872 case MC_ENGINE_IDX_GR0: 873 type = NVKM_ENGINE_GR; 874 inst = 0; 875 break; 876 case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7: 877 type = NVKM_ENGINE_NVDEC; 878 inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVDEC0; 879 break; 880 case MC_ENGINE_IDX_MSENC ... MC_ENGINE_IDX_MSENC2: 881 type = NVKM_ENGINE_NVENC; 882 inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_MSENC; 883 break; 884 case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7: 885 type = NVKM_ENGINE_NVJPG; 886 inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVJPEG0; 887 break; 888 case MC_ENGINE_IDX_OFA0: 889 type = NVKM_ENGINE_OFA; 890 inst = 0; 891 break; 892 default: 893 continue; 894 } 895 896 if (WARN_ON(gsp->intr_nr == ARRAY_SIZE(gsp->intr))) { 897 ret = -ENOSPC; 898 break; 899 } 900 901 gsp->intr[gsp->intr_nr].type = type; 902 gsp->intr[gsp->intr_nr].inst = inst; 903 gsp->intr[gsp->intr_nr].stall = ctrl->table[i].vectorStall; 904 gsp->intr[gsp->intr_nr].nonstall = ctrl->table[i].vectorNonStall; 905 gsp->intr_nr++; 906 } 907 908 nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); 909 return ret; 910 } 911 912 static int 913 r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp) 914 { 915 GspStaticConfigInfo *rpc; 916 int last_usable = -1; 917 918 rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc)); 919 if (IS_ERR(rpc)) 920 return PTR_ERR(rpc); 921 922 gsp->internal.client.object.client = &gsp->internal.client; 923 gsp->internal.client.object.parent = NULL; 924 gsp->internal.client.object.handle = rpc->hInternalClient; 925 gsp->internal.client.gsp = gsp; 926 927 gsp->internal.device.object.client = &gsp->internal.client; 928 gsp->internal.device.object.parent = &gsp->internal.client.object; 929 gsp->internal.device.object.handle = rpc->hInternalDevice; 930 931 gsp->internal.device.subdevice.client = &gsp->internal.client; 932 gsp->internal.device.subdevice.parent = &gsp->internal.device.object; 933 gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice; 934 935 gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase; 936 gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase; 937 938 for (int i = 0; i < rpc->fbRegionInfoParams.numFBRegions; i++) { 939 NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO *reg = 940 &rpc->fbRegionInfoParams.fbRegion[i]; 941 942 nvkm_debug(&gsp->subdev, "fb region %d: " 943 "%016llx-%016llx rsvd:%016llx perf:%08x comp:%d iso:%d prot:%d\n", i, 944 reg->base, reg->limit, reg->reserved, reg->performance, 945 reg->supportCompressed, reg->supportISO, reg->bProtected); 946 947 if (!reg->reserved && !reg->bProtected) { 948 if (reg->supportCompressed && reg->supportISO && 949 !WARN_ON_ONCE(gsp->fb.region_nr >= ARRAY_SIZE(gsp->fb.region))) { 950 const u64 size = (reg->limit + 1) - reg->base; 951 952 gsp->fb.region[gsp->fb.region_nr].addr = reg->base; 953 gsp->fb.region[gsp->fb.region_nr].size = size; 954 gsp->fb.region_nr++; 955 } 956 957 last_usable = i; 958 } 959 } 960 961 if (last_usable >= 0) { 962 u32 rsvd_base = rpc->fbRegionInfoParams.fbRegion[last_usable].limit + 1; 963 964 gsp->fb.rsvd_size = gsp->fb.heap.addr - rsvd_base; 965 } 966 967 for (int gpc = 0; gpc < ARRAY_SIZE(rpc->tpcInfo); gpc++) { 968 if (rpc->gpcInfo.gpcMask & BIT(gpc)) { 969 gsp->gr.tpcs += hweight32(rpc->tpcInfo[gpc].tpcMask); 970 gsp->gr.gpcs++; 971 } 972 } 973 974 nvkm_gsp_rpc_done(gsp, rpc); 975 return 0; 976 } 977 978 static int 979 r535_gsp_postinit(struct nvkm_gsp *gsp) 980 { 981 struct nvkm_device *device = gsp->subdev.device; 982 int ret; 983 984 ret = r535_gsp_rpc_get_gsp_static_info(gsp); 985 if (WARN_ON(ret)) 986 return ret; 987 988 INIT_WORK(&gsp->msgq.work, r535_gsp_msgq_work); 989 990 ret = r535_gsp_intr_get_table(gsp); 991 if (WARN_ON(ret)) 992 return ret; 993 994 ret = nvkm_gsp_intr_stall(gsp, gsp->subdev.type, gsp->subdev.inst); 995 if (WARN_ON(ret < 0)) 996 return ret; 997 998 ret = nvkm_inth_add(&device->vfn->intr, ret, NVKM_INTR_PRIO_NORMAL, &gsp->subdev, 999 r535_gsp_intr, &gsp->subdev.inth); 1000 if (WARN_ON(ret)) 1001 return ret; 1002 1003 nvkm_inth_allow(&gsp->subdev.inth); 1004 nvkm_wr32(device, 0x110004, 0x00000040); 1005 return ret; 1006 } 1007 1008 static int 1009 r535_gsp_rpc_unloading_guest_driver(struct nvkm_gsp *gsp, bool suspend) 1010 { 1011 rpc_unloading_guest_driver_v1F_07 *rpc; 1012 1013 rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_UNLOADING_GUEST_DRIVER, sizeof(*rpc)); 1014 if (IS_ERR(rpc)) 1015 return PTR_ERR(rpc); 1016 1017 if (suspend) { 1018 rpc->bInPMTransition = 1; 1019 rpc->bGc6Entering = 0; 1020 rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3; 1021 } else { 1022 rpc->bInPMTransition = 0; 1023 rpc->bGc6Entering = 0; 1024 rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0; 1025 } 1026 1027 return nvkm_gsp_rpc_wr(gsp, rpc, true); 1028 } 1029 1030 /* dword only */ 1031 struct nv_gsp_registry_entries { 1032 const char *name; 1033 u32 value; 1034 }; 1035 1036 static const struct nv_gsp_registry_entries r535_registry_entries[] = { 1037 { "RMSecBusResetEnable", 1 }, 1038 { "RMForcePcieConfigSave", 1 }, 1039 }; 1040 #define NV_GSP_REG_NUM_ENTRIES ARRAY_SIZE(r535_registry_entries) 1041 1042 static int 1043 r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp) 1044 { 1045 PACKED_REGISTRY_TABLE *rpc; 1046 char *strings; 1047 int str_offset; 1048 int i; 1049 size_t rpc_size = struct_size(rpc, entries, NV_GSP_REG_NUM_ENTRIES); 1050 1051 /* add strings + null terminator */ 1052 for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++) 1053 rpc_size += strlen(r535_registry_entries[i].name) + 1; 1054 1055 rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_SET_REGISTRY, rpc_size); 1056 if (IS_ERR(rpc)) 1057 return PTR_ERR(rpc); 1058 1059 rpc->size = sizeof(*rpc); 1060 rpc->numEntries = NV_GSP_REG_NUM_ENTRIES; 1061 1062 str_offset = offsetof(typeof(*rpc), entries[NV_GSP_REG_NUM_ENTRIES]); 1063 strings = (char *)&rpc->entries[NV_GSP_REG_NUM_ENTRIES]; 1064 for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++) { 1065 int name_len = strlen(r535_registry_entries[i].name) + 1; 1066 1067 rpc->entries[i].nameOffset = str_offset; 1068 rpc->entries[i].type = 1; 1069 rpc->entries[i].data = r535_registry_entries[i].value; 1070 rpc->entries[i].length = 4; 1071 memcpy(strings, r535_registry_entries[i].name, name_len); 1072 strings += name_len; 1073 str_offset += name_len; 1074 } 1075 1076 return nvkm_gsp_rpc_wr(gsp, rpc, false); 1077 } 1078 1079 #if defined(CONFIG_ACPI) && defined(CONFIG_X86) 1080 static void 1081 r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps) 1082 { 1083 const guid_t NVOP_DSM_GUID = 1084 GUID_INIT(0xA486D8F8, 0x0BDA, 0x471B, 1085 0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0); 1086 u64 NVOP_DSM_REV = 0x00000100; 1087 union acpi_object argv4 = { 1088 .buffer.type = ACPI_TYPE_BUFFER, 1089 .buffer.length = 4, 1090 .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL), 1091 }, *obj; 1092 1093 caps->status = 0xffff; 1094 1095 if (!acpi_check_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, BIT_ULL(0x1a))) 1096 return; 1097 1098 obj = acpi_evaluate_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, 0x1a, &argv4); 1099 if (!obj) 1100 return; 1101 1102 printk(KERN_ERR "nvop: obj type %d\n", obj->type); 1103 printk(KERN_ERR "nvop: obj len %d\n", obj->buffer.length); 1104 1105 if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) || 1106 WARN_ON(obj->buffer.length != 4)) 1107 return; 1108 1109 caps->status = 0; 1110 caps->optimusCaps = *(u32 *)obj->buffer.pointer; 1111 printk(KERN_ERR "nvop: caps %08x\n", caps->optimusCaps); 1112 1113 ACPI_FREE(obj); 1114 1115 kfree(argv4.buffer.pointer); 1116 } 1117 1118 static void 1119 r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt) 1120 { 1121 const guid_t JT_DSM_GUID = 1122 GUID_INIT(0xCBECA351L, 0x067B, 0x4924, 1123 0x9C, 0xBD, 0xB4, 0x6B, 0x00, 0xB8, 0x6F, 0x34); 1124 u64 JT_DSM_REV = 0x00000103; 1125 u32 caps; 1126 union acpi_object argv4 = { 1127 .buffer.type = ACPI_TYPE_BUFFER, 1128 .buffer.length = sizeof(caps), 1129 .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL), 1130 }, *obj; 1131 1132 jt->status = 0xffff; 1133 1134 obj = acpi_evaluate_dsm(handle, &JT_DSM_GUID, JT_DSM_REV, 0x1, &argv4); 1135 if (!obj) 1136 return; 1137 1138 printk(KERN_ERR "jt: obj type %d\n", obj->type); 1139 printk(KERN_ERR "jt: obj len %d\n", obj->buffer.length); 1140 1141 if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) || 1142 WARN_ON(obj->buffer.length != 4)) 1143 return; 1144 1145 jt->status = 0; 1146 jt->jtCaps = *(u32 *)obj->buffer.pointer; 1147 jt->jtRevId = (jt->jtCaps & 0xfff00000) >> 20; 1148 jt->bSBIOSCaps = 0; 1149 printk(KERN_ERR "jt: caps %08x rev:%04x\n", jt->jtCaps, jt->jtRevId); 1150 1151 ACPI_FREE(obj); 1152 1153 kfree(argv4.buffer.pointer); 1154 } 1155 1156 static void 1157 r535_gsp_acpi_mux_id(acpi_handle handle, u32 id, MUX_METHOD_DATA_ELEMENT *mode, 1158 MUX_METHOD_DATA_ELEMENT *part) 1159 { 1160 acpi_handle iter = NULL, handle_mux = NULL; 1161 acpi_status status; 1162 unsigned long long value; 1163 1164 mode->status = 0xffff; 1165 part->status = 0xffff; 1166 1167 do { 1168 status = acpi_get_next_object(ACPI_TYPE_DEVICE, handle, iter, &iter); 1169 if (ACPI_FAILURE(status) || !iter) 1170 return; 1171 1172 status = acpi_evaluate_integer(iter, "_ADR", NULL, &value); 1173 if (ACPI_FAILURE(status) || value != id) 1174 continue; 1175 1176 handle_mux = iter; 1177 } while (!handle_mux); 1178 1179 if (!handle_mux) 1180 return; 1181 1182 status = acpi_evaluate_integer(handle_mux, "MXDM", NULL, &value); 1183 if (ACPI_SUCCESS(status)) { 1184 mode->acpiId = id; 1185 mode->mode = value; 1186 mode->status = 0; 1187 } 1188 1189 status = acpi_evaluate_integer(handle_mux, "MXDS", NULL, &value); 1190 if (ACPI_SUCCESS(status)) { 1191 part->acpiId = id; 1192 part->mode = value; 1193 part->status = 0; 1194 } 1195 } 1196 1197 static void 1198 r535_gsp_acpi_mux(acpi_handle handle, DOD_METHOD_DATA *dod, MUX_METHOD_DATA *mux) 1199 { 1200 mux->tableLen = dod->acpiIdListLen / sizeof(dod->acpiIdList[0]); 1201 1202 for (int i = 0; i < mux->tableLen; i++) { 1203 r535_gsp_acpi_mux_id(handle, dod->acpiIdList[i], &mux->acpiIdMuxModeTable[i], 1204 &mux->acpiIdMuxPartTable[i]); 1205 } 1206 } 1207 1208 static void 1209 r535_gsp_acpi_dod(acpi_handle handle, DOD_METHOD_DATA *dod) 1210 { 1211 acpi_status status; 1212 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; 1213 union acpi_object *_DOD; 1214 1215 dod->status = 0xffff; 1216 1217 status = acpi_evaluate_object(handle, "_DOD", NULL, &output); 1218 if (ACPI_FAILURE(status)) 1219 return; 1220 1221 _DOD = output.pointer; 1222 1223 if (WARN_ON(_DOD->type != ACPI_TYPE_PACKAGE) || 1224 WARN_ON(_DOD->package.count > ARRAY_SIZE(dod->acpiIdList))) 1225 return; 1226 1227 for (int i = 0; i < _DOD->package.count; i++) { 1228 if (WARN_ON(_DOD->package.elements[i].type != ACPI_TYPE_INTEGER)) 1229 return; 1230 1231 dod->acpiIdList[i] = _DOD->package.elements[i].integer.value; 1232 dod->acpiIdListLen += sizeof(dod->acpiIdList[0]); 1233 } 1234 1235 printk(KERN_ERR "_DOD: ok! len:%d\n", dod->acpiIdListLen); 1236 dod->status = 0; 1237 } 1238 #endif 1239 1240 static void 1241 r535_gsp_acpi_info(struct nvkm_gsp *gsp, ACPI_METHOD_DATA *acpi) 1242 { 1243 #if defined(CONFIG_ACPI) && defined(CONFIG_X86) 1244 acpi_handle handle = ACPI_HANDLE(gsp->subdev.device->dev); 1245 1246 if (!handle) 1247 return; 1248 1249 acpi->bValid = 1; 1250 1251 r535_gsp_acpi_dod(handle, &acpi->dodMethodData); 1252 if (acpi->dodMethodData.status == 0) 1253 r535_gsp_acpi_mux(handle, &acpi->dodMethodData, &acpi->muxMethodData); 1254 1255 r535_gsp_acpi_jt(handle, &acpi->jtMethodData); 1256 r535_gsp_acpi_caps(handle, &acpi->capsMethodData); 1257 #endif 1258 } 1259 1260 static int 1261 r535_gsp_rpc_set_system_info(struct nvkm_gsp *gsp) 1262 { 1263 struct nvkm_device *device = gsp->subdev.device; 1264 struct nvkm_device_pci *pdev = container_of(device, typeof(*pdev), device); 1265 GspSystemInfo *info; 1266 1267 if (WARN_ON(device->type == NVKM_DEVICE_TEGRA)) 1268 return -ENOSYS; 1269 1270 info = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO, sizeof(*info)); 1271 if (IS_ERR(info)) 1272 return PTR_ERR(info); 1273 1274 info->gpuPhysAddr = device->func->resource_addr(device, 0); 1275 info->gpuPhysFbAddr = device->func->resource_addr(device, 1); 1276 info->gpuPhysInstAddr = device->func->resource_addr(device, 3); 1277 info->nvDomainBusDeviceFunc = pci_dev_id(pdev->pdev); 1278 info->maxUserVa = TASK_SIZE; 1279 info->pciConfigMirrorBase = 0x088000; 1280 info->pciConfigMirrorSize = 0x001000; 1281 r535_gsp_acpi_info(gsp, &info->acpiMethodData); 1282 1283 return nvkm_gsp_rpc_wr(gsp, info, false); 1284 } 1285 1286 static int 1287 r535_gsp_msg_os_error_log(void *priv, u32 fn, void *repv, u32 repc) 1288 { 1289 struct nvkm_gsp *gsp = priv; 1290 struct nvkm_subdev *subdev = &gsp->subdev; 1291 rpc_os_error_log_v17_00 *msg = repv; 1292 1293 if (WARN_ON(repc < sizeof(*msg))) 1294 return -EINVAL; 1295 1296 nvkm_error(subdev, "Xid:%d %s\n", msg->exceptType, msg->errString); 1297 return 0; 1298 } 1299 1300 static int 1301 r535_gsp_msg_rc_triggered(void *priv, u32 fn, void *repv, u32 repc) 1302 { 1303 rpc_rc_triggered_v17_02 *msg = repv; 1304 struct nvkm_gsp *gsp = priv; 1305 struct nvkm_subdev *subdev = &gsp->subdev; 1306 struct nvkm_chan *chan; 1307 unsigned long flags; 1308 1309 if (WARN_ON(repc < sizeof(*msg))) 1310 return -EINVAL; 1311 1312 nvkm_error(subdev, "rc engn:%08x chid:%d type:%d scope:%d part:%d\n", 1313 msg->nv2080EngineType, msg->chid, msg->exceptType, msg->scope, 1314 msg->partitionAttributionId); 1315 1316 chan = nvkm_chan_get_chid(&subdev->device->fifo->engine, msg->chid / 8, &flags); 1317 if (!chan) { 1318 nvkm_error(subdev, "rc chid:%d not found!\n", msg->chid); 1319 return 0; 1320 } 1321 1322 nvkm_chan_error(chan, false); 1323 nvkm_chan_put(&chan, flags); 1324 return 0; 1325 } 1326 1327 static int 1328 r535_gsp_msg_mmu_fault_queued(void *priv, u32 fn, void *repv, u32 repc) 1329 { 1330 struct nvkm_gsp *gsp = priv; 1331 struct nvkm_subdev *subdev = &gsp->subdev; 1332 1333 WARN_ON(repc != 0); 1334 1335 nvkm_error(subdev, "mmu fault queued\n"); 1336 return 0; 1337 } 1338 1339 static int 1340 r535_gsp_msg_post_event(void *priv, u32 fn, void *repv, u32 repc) 1341 { 1342 struct nvkm_gsp *gsp = priv; 1343 struct nvkm_gsp_client *client; 1344 struct nvkm_subdev *subdev = &gsp->subdev; 1345 rpc_post_event_v17_00 *msg = repv; 1346 1347 if (WARN_ON(repc < sizeof(*msg))) 1348 return -EINVAL; 1349 if (WARN_ON(repc != sizeof(*msg) + msg->eventDataSize)) 1350 return -EINVAL; 1351 1352 nvkm_debug(subdev, "event: %08x %08x %d %08x %08x %d %d\n", 1353 msg->hClient, msg->hEvent, msg->notifyIndex, msg->data, 1354 msg->status, msg->eventDataSize, msg->bNotifyList); 1355 1356 mutex_lock(&gsp->client_id.mutex); 1357 client = idr_find(&gsp->client_id.idr, msg->hClient & 0xffff); 1358 if (client) { 1359 struct nvkm_gsp_event *event; 1360 bool handled = false; 1361 1362 list_for_each_entry(event, &client->events, head) { 1363 if (event->object.handle == msg->hEvent) { 1364 event->func(event, msg->eventData, msg->eventDataSize); 1365 handled = true; 1366 } 1367 } 1368 1369 if (!handled) { 1370 nvkm_error(subdev, "event: cid 0x%08x event 0x%08x not found!\n", 1371 msg->hClient, msg->hEvent); 1372 } 1373 } else { 1374 nvkm_error(subdev, "event: cid 0x%08x not found!\n", msg->hClient); 1375 } 1376 mutex_unlock(&gsp->client_id.mutex); 1377 return 0; 1378 } 1379 1380 /** 1381 * r535_gsp_msg_run_cpu_sequencer() -- process I/O commands from the GSP 1382 * 1383 * The GSP sequencer is a list of I/O commands that the GSP can send to 1384 * the driver to perform for various purposes. The most common usage is to 1385 * perform a special mid-initialization reset. 1386 */ 1387 static int 1388 r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc) 1389 { 1390 struct nvkm_gsp *gsp = priv; 1391 struct nvkm_subdev *subdev = &gsp->subdev; 1392 struct nvkm_device *device = subdev->device; 1393 rpc_run_cpu_sequencer_v17_00 *seq = repv; 1394 int ptr = 0, ret; 1395 1396 nvkm_debug(subdev, "seq: %08x %08x\n", seq->bufferSizeDWord, seq->cmdIndex); 1397 1398 while (ptr < seq->cmdIndex) { 1399 GSP_SEQUENCER_BUFFER_CMD *cmd = (void *)&seq->commandBuffer[ptr]; 1400 1401 ptr += 1; 1402 ptr += GSP_SEQUENCER_PAYLOAD_SIZE_DWORDS(cmd->opCode); 1403 1404 switch (cmd->opCode) { 1405 case GSP_SEQ_BUF_OPCODE_REG_WRITE: { 1406 u32 addr = cmd->payload.regWrite.addr; 1407 u32 data = cmd->payload.regWrite.val; 1408 1409 nvkm_trace(subdev, "seq wr32 %06x %08x\n", addr, data); 1410 nvkm_wr32(device, addr, data); 1411 } 1412 break; 1413 case GSP_SEQ_BUF_OPCODE_REG_MODIFY: { 1414 u32 addr = cmd->payload.regModify.addr; 1415 u32 mask = cmd->payload.regModify.mask; 1416 u32 data = cmd->payload.regModify.val; 1417 1418 nvkm_trace(subdev, "seq mask %06x %08x %08x\n", addr, mask, data); 1419 nvkm_mask(device, addr, mask, data); 1420 } 1421 break; 1422 case GSP_SEQ_BUF_OPCODE_REG_POLL: { 1423 u32 addr = cmd->payload.regPoll.addr; 1424 u32 mask = cmd->payload.regPoll.mask; 1425 u32 data = cmd->payload.regPoll.val; 1426 u32 usec = cmd->payload.regPoll.timeout ?: 4000000; 1427 //u32 error = cmd->payload.regPoll.error; 1428 1429 nvkm_trace(subdev, "seq poll %06x %08x %08x %d\n", addr, mask, data, usec); 1430 nvkm_rd32(device, addr); 1431 nvkm_usec(device, usec, 1432 if ((nvkm_rd32(device, addr) & mask) == data) 1433 break; 1434 ); 1435 } 1436 break; 1437 case GSP_SEQ_BUF_OPCODE_DELAY_US: { 1438 u32 usec = cmd->payload.delayUs.val; 1439 1440 nvkm_trace(subdev, "seq usec %d\n", usec); 1441 udelay(usec); 1442 } 1443 break; 1444 case GSP_SEQ_BUF_OPCODE_REG_STORE: { 1445 u32 addr = cmd->payload.regStore.addr; 1446 u32 slot = cmd->payload.regStore.index; 1447 1448 seq->regSaveArea[slot] = nvkm_rd32(device, addr); 1449 nvkm_trace(subdev, "seq save %08x -> %d: %08x\n", addr, slot, 1450 seq->regSaveArea[slot]); 1451 } 1452 break; 1453 case GSP_SEQ_BUF_OPCODE_CORE_RESET: 1454 nvkm_trace(subdev, "seq core reset\n"); 1455 nvkm_falcon_reset(&gsp->falcon); 1456 nvkm_falcon_mask(&gsp->falcon, 0x624, 0x00000080, 0x00000080); 1457 nvkm_falcon_wr32(&gsp->falcon, 0x10c, 0x00000000); 1458 break; 1459 case GSP_SEQ_BUF_OPCODE_CORE_START: 1460 nvkm_trace(subdev, "seq core start\n"); 1461 if (nvkm_falcon_rd32(&gsp->falcon, 0x100) & 0x00000040) 1462 nvkm_falcon_wr32(&gsp->falcon, 0x130, 0x00000002); 1463 else 1464 nvkm_falcon_wr32(&gsp->falcon, 0x100, 0x00000002); 1465 break; 1466 case GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT: 1467 nvkm_trace(subdev, "seq core wait halt\n"); 1468 nvkm_msec(device, 2000, 1469 if (nvkm_falcon_rd32(&gsp->falcon, 0x100) & 0x00000010) 1470 break; 1471 ); 1472 break; 1473 case GSP_SEQ_BUF_OPCODE_CORE_RESUME: { 1474 struct nvkm_sec2 *sec2 = device->sec2; 1475 u32 mbox0; 1476 1477 nvkm_trace(subdev, "seq core resume\n"); 1478 1479 ret = gsp->func->reset(gsp); 1480 if (WARN_ON(ret)) 1481 return ret; 1482 1483 nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr)); 1484 nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr)); 1485 1486 nvkm_falcon_start(&sec2->falcon); 1487 1488 if (nvkm_msec(device, 2000, 1489 if (nvkm_rd32(device, 0x1180f8) & 0x04000000) 1490 break; 1491 ) < 0) 1492 return -ETIMEDOUT; 1493 1494 mbox0 = nvkm_falcon_rd32(&sec2->falcon, 0x040); 1495 if (WARN_ON(mbox0)) { 1496 nvkm_error(&gsp->subdev, "seq core resume sec2: 0x%x\n", mbox0); 1497 return -EIO; 1498 } 1499 1500 nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version); 1501 1502 if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon))) 1503 return -EIO; 1504 } 1505 break; 1506 default: 1507 nvkm_error(subdev, "unknown sequencer opcode %08x\n", cmd->opCode); 1508 return -EINVAL; 1509 } 1510 } 1511 1512 return 0; 1513 } 1514 1515 static void 1516 nvkm_gsp_mem_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_mem *mem) 1517 { 1518 if (mem->data) { 1519 dma_free_coherent(gsp->subdev.device->dev, mem->size, mem->data, mem->addr); 1520 mem->data = NULL; 1521 } 1522 } 1523 1524 static int 1525 nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, u32 size, struct nvkm_gsp_mem *mem) 1526 { 1527 mem->size = size; 1528 mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL); 1529 if (WARN_ON(!mem->data)) 1530 return -ENOMEM; 1531 1532 return 0; 1533 } 1534 1535 1536 static int 1537 r535_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1) 1538 { 1539 struct nvkm_subdev *subdev = &gsp->subdev; 1540 struct nvkm_device *device = subdev->device; 1541 u32 wpr2_hi; 1542 int ret; 1543 1544 wpr2_hi = nvkm_rd32(device, 0x1fa828); 1545 if (!wpr2_hi) { 1546 nvkm_debug(subdev, "WPR2 not set - skipping booter unload\n"); 1547 return 0; 1548 } 1549 1550 ret = nvkm_falcon_fw_boot(&gsp->booter.unload, &gsp->subdev, true, &mbox0, &mbox1, 0, 0); 1551 if (WARN_ON(ret)) 1552 return ret; 1553 1554 wpr2_hi = nvkm_rd32(device, 0x1fa828); 1555 if (WARN_ON(wpr2_hi)) 1556 return -EIO; 1557 1558 return 0; 1559 } 1560 1561 static int 1562 r535_gsp_booter_load(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1) 1563 { 1564 int ret; 1565 1566 ret = nvkm_falcon_fw_boot(&gsp->booter.load, &gsp->subdev, true, &mbox0, &mbox1, 0, 0); 1567 if (ret) 1568 return ret; 1569 1570 nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version); 1571 1572 if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon))) 1573 return -EIO; 1574 1575 return 0; 1576 } 1577 1578 static int 1579 r535_gsp_wpr_meta_init(struct nvkm_gsp *gsp) 1580 { 1581 GspFwWprMeta *meta; 1582 int ret; 1583 1584 ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->wpr_meta); 1585 if (ret) 1586 return ret; 1587 1588 meta = gsp->wpr_meta.data; 1589 1590 meta->magic = GSP_FW_WPR_META_MAGIC; 1591 meta->revision = GSP_FW_WPR_META_REVISION; 1592 1593 meta->sysmemAddrOfRadix3Elf = gsp->radix3.mem[0].addr; 1594 meta->sizeOfRadix3Elf = gsp->fb.wpr2.elf.size; 1595 1596 meta->sysmemAddrOfBootloader = gsp->boot.fw.addr; 1597 meta->sizeOfBootloader = gsp->boot.fw.size; 1598 meta->bootloaderCodeOffset = gsp->boot.code_offset; 1599 meta->bootloaderDataOffset = gsp->boot.data_offset; 1600 meta->bootloaderManifestOffset = gsp->boot.manifest_offset; 1601 1602 meta->sysmemAddrOfSignature = gsp->sig.addr; 1603 meta->sizeOfSignature = gsp->sig.size; 1604 1605 meta->gspFwRsvdStart = gsp->fb.heap.addr; 1606 meta->nonWprHeapOffset = gsp->fb.heap.addr; 1607 meta->nonWprHeapSize = gsp->fb.heap.size; 1608 meta->gspFwWprStart = gsp->fb.wpr2.addr; 1609 meta->gspFwHeapOffset = gsp->fb.wpr2.heap.addr; 1610 meta->gspFwHeapSize = gsp->fb.wpr2.heap.size; 1611 meta->gspFwOffset = gsp->fb.wpr2.elf.addr; 1612 meta->bootBinOffset = gsp->fb.wpr2.boot.addr; 1613 meta->frtsOffset = gsp->fb.wpr2.frts.addr; 1614 meta->frtsSize = gsp->fb.wpr2.frts.size; 1615 meta->gspFwWprEnd = ALIGN_DOWN(gsp->fb.bios.vga_workspace.addr, 0x20000); 1616 meta->fbSize = gsp->fb.size; 1617 meta->vgaWorkspaceOffset = gsp->fb.bios.vga_workspace.addr; 1618 meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size; 1619 meta->bootCount = 0; 1620 meta->partitionRpcAddr = 0; 1621 meta->partitionRpcRequestOffset = 0; 1622 meta->partitionRpcReplyOffset = 0; 1623 meta->verified = 0; 1624 return 0; 1625 } 1626 1627 static int 1628 r535_gsp_shared_init(struct nvkm_gsp *gsp) 1629 { 1630 struct { 1631 msgqTxHeader tx; 1632 msgqRxHeader rx; 1633 } *cmdq, *msgq; 1634 int ret, i; 1635 1636 gsp->shm.cmdq.size = 0x40000; 1637 gsp->shm.msgq.size = 0x40000; 1638 1639 gsp->shm.ptes.nr = (gsp->shm.cmdq.size + gsp->shm.msgq.size) >> GSP_PAGE_SHIFT; 1640 gsp->shm.ptes.nr += DIV_ROUND_UP(gsp->shm.ptes.nr * sizeof(u64), GSP_PAGE_SIZE); 1641 gsp->shm.ptes.size = ALIGN(gsp->shm.ptes.nr * sizeof(u64), GSP_PAGE_SIZE); 1642 1643 ret = nvkm_gsp_mem_ctor(gsp, gsp->shm.ptes.size + 1644 gsp->shm.cmdq.size + 1645 gsp->shm.msgq.size, 1646 &gsp->shm.mem); 1647 if (ret) 1648 return ret; 1649 1650 gsp->shm.ptes.ptr = gsp->shm.mem.data; 1651 gsp->shm.cmdq.ptr = (u8 *)gsp->shm.ptes.ptr + gsp->shm.ptes.size; 1652 gsp->shm.msgq.ptr = (u8 *)gsp->shm.cmdq.ptr + gsp->shm.cmdq.size; 1653 1654 for (i = 0; i < gsp->shm.ptes.nr; i++) 1655 gsp->shm.ptes.ptr[i] = gsp->shm.mem.addr + (i << GSP_PAGE_SHIFT); 1656 1657 cmdq = gsp->shm.cmdq.ptr; 1658 cmdq->tx.version = 0; 1659 cmdq->tx.size = gsp->shm.cmdq.size; 1660 cmdq->tx.entryOff = GSP_PAGE_SIZE; 1661 cmdq->tx.msgSize = GSP_PAGE_SIZE; 1662 cmdq->tx.msgCount = (cmdq->tx.size - cmdq->tx.entryOff) / cmdq->tx.msgSize; 1663 cmdq->tx.writePtr = 0; 1664 cmdq->tx.flags = 1; 1665 cmdq->tx.rxHdrOff = offsetof(typeof(*cmdq), rx.readPtr); 1666 1667 msgq = gsp->shm.msgq.ptr; 1668 1669 gsp->cmdq.cnt = cmdq->tx.msgCount; 1670 gsp->cmdq.wptr = &cmdq->tx.writePtr; 1671 gsp->cmdq.rptr = &msgq->rx.readPtr; 1672 gsp->msgq.cnt = cmdq->tx.msgCount; 1673 gsp->msgq.wptr = &msgq->tx.writePtr; 1674 gsp->msgq.rptr = &cmdq->rx.readPtr; 1675 return 0; 1676 } 1677 1678 static int 1679 r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume) 1680 { 1681 GSP_ARGUMENTS_CACHED *args; 1682 int ret; 1683 1684 if (!resume) { 1685 ret = r535_gsp_shared_init(gsp); 1686 if (ret) 1687 return ret; 1688 1689 ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->rmargs); 1690 if (ret) 1691 return ret; 1692 } 1693 1694 args = gsp->rmargs.data; 1695 args->messageQueueInitArguments.sharedMemPhysAddr = gsp->shm.mem.addr; 1696 args->messageQueueInitArguments.pageTableEntryCount = gsp->shm.ptes.nr; 1697 args->messageQueueInitArguments.cmdQueueOffset = 1698 (u8 *)gsp->shm.cmdq.ptr - (u8 *)gsp->shm.mem.data; 1699 args->messageQueueInitArguments.statQueueOffset = 1700 (u8 *)gsp->shm.msgq.ptr - (u8 *)gsp->shm.mem.data; 1701 1702 if (!resume) { 1703 args->srInitArguments.oldLevel = 0; 1704 args->srInitArguments.flags = 0; 1705 args->srInitArguments.bInPMTransition = 0; 1706 } else { 1707 args->srInitArguments.oldLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3; 1708 args->srInitArguments.flags = 0; 1709 args->srInitArguments.bInPMTransition = 1; 1710 } 1711 1712 return 0; 1713 } 1714 1715 static inline u64 1716 r535_gsp_libos_id8(const char *name) 1717 { 1718 u64 id = 0; 1719 1720 for (int i = 0; i < sizeof(id) && *name; i++, name++) 1721 id = (id << 8) | *name; 1722 1723 return id; 1724 } 1725 1726 /** 1727 * create_pte_array() - creates a PTE array of a physically contiguous buffer 1728 * @ptes: pointer to the array 1729 * @addr: base address of physically contiguous buffer (GSP_PAGE_SIZE aligned) 1730 * @size: size of the buffer 1731 * 1732 * GSP-RM sometimes expects physically-contiguous buffers to have an array of 1733 * "PTEs" for each page in that buffer. Although in theory that allows for 1734 * the buffer to be physically discontiguous, GSP-RM does not currently 1735 * support that. 1736 * 1737 * In this case, the PTEs are DMA addresses of each page of the buffer. Since 1738 * the buffer is physically contiguous, calculating all the PTEs is simple 1739 * math. 1740 * 1741 * See memdescGetPhysAddrsForGpu() 1742 */ 1743 static void create_pte_array(u64 *ptes, dma_addr_t addr, size_t size) 1744 { 1745 unsigned int num_pages = DIV_ROUND_UP_ULL(size, GSP_PAGE_SIZE); 1746 unsigned int i; 1747 1748 for (i = 0; i < num_pages; i++) 1749 ptes[i] = (u64)addr + (i << GSP_PAGE_SHIFT); 1750 } 1751 1752 /** 1753 * r535_gsp_libos_init() -- create the libos arguments structure 1754 * 1755 * The logging buffers are byte queues that contain encoded printf-like 1756 * messages from GSP-RM. They need to be decoded by a special application 1757 * that can parse the buffers. 1758 * 1759 * The 'loginit' buffer contains logs from early GSP-RM init and 1760 * exception dumps. The 'logrm' buffer contains the subsequent logs. Both are 1761 * written to directly by GSP-RM and can be any multiple of GSP_PAGE_SIZE. 1762 * 1763 * The physical address map for the log buffer is stored in the buffer 1764 * itself, starting with offset 1. Offset 0 contains the "put" pointer. 1765 * 1766 * The GSP only understands 4K pages (GSP_PAGE_SIZE), so even if the kernel is 1767 * configured for a larger page size (e.g. 64K pages), we need to give 1768 * the GSP an array of 4K pages. Fortunately, since the buffer is 1769 * physically contiguous, it's simple math to calculate the addresses. 1770 * 1771 * The buffers must be a multiple of GSP_PAGE_SIZE. GSP-RM also currently 1772 * ignores the @kind field for LOGINIT, LOGINTR, and LOGRM, but expects the 1773 * buffers to be physically contiguous anyway. 1774 * 1775 * The memory allocated for the arguments must remain until the GSP sends the 1776 * init_done RPC. 1777 * 1778 * See _kgspInitLibosLoggingStructures (allocates memory for buffers) 1779 * See kgspSetupLibosInitArgs_IMPL (creates pLibosInitArgs[] array) 1780 */ 1781 static int 1782 r535_gsp_libos_init(struct nvkm_gsp *gsp) 1783 { 1784 LibosMemoryRegionInitArgument *args; 1785 int ret; 1786 1787 ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->libos); 1788 if (ret) 1789 return ret; 1790 1791 args = gsp->libos.data; 1792 1793 ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->loginit); 1794 if (ret) 1795 return ret; 1796 1797 args[0].id8 = r535_gsp_libos_id8("LOGINIT"); 1798 args[0].pa = gsp->loginit.addr; 1799 args[0].size = gsp->loginit.size; 1800 args[0].kind = LIBOS_MEMORY_REGION_CONTIGUOUS; 1801 args[0].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM; 1802 create_pte_array(gsp->loginit.data + sizeof(u64), gsp->loginit.addr, gsp->loginit.size); 1803 1804 ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->logintr); 1805 if (ret) 1806 return ret; 1807 1808 args[1].id8 = r535_gsp_libos_id8("LOGINTR"); 1809 args[1].pa = gsp->logintr.addr; 1810 args[1].size = gsp->logintr.size; 1811 args[1].kind = LIBOS_MEMORY_REGION_CONTIGUOUS; 1812 args[1].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM; 1813 create_pte_array(gsp->logintr.data + sizeof(u64), gsp->logintr.addr, gsp->logintr.size); 1814 1815 ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->logrm); 1816 if (ret) 1817 return ret; 1818 1819 args[2].id8 = r535_gsp_libos_id8("LOGRM"); 1820 args[2].pa = gsp->logrm.addr; 1821 args[2].size = gsp->logrm.size; 1822 args[2].kind = LIBOS_MEMORY_REGION_CONTIGUOUS; 1823 args[2].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM; 1824 create_pte_array(gsp->logrm.data + sizeof(u64), gsp->logrm.addr, gsp->logrm.size); 1825 1826 ret = r535_gsp_rmargs_init(gsp, false); 1827 if (ret) 1828 return ret; 1829 1830 args[3].id8 = r535_gsp_libos_id8("RMARGS"); 1831 args[3].pa = gsp->rmargs.addr; 1832 args[3].size = gsp->rmargs.size; 1833 args[3].kind = LIBOS_MEMORY_REGION_CONTIGUOUS; 1834 args[3].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM; 1835 return 0; 1836 } 1837 1838 void 1839 nvkm_gsp_sg_free(struct nvkm_device *device, struct sg_table *sgt) 1840 { 1841 struct scatterlist *sgl; 1842 int i; 1843 1844 dma_unmap_sgtable(device->dev, sgt, DMA_BIDIRECTIONAL, 0); 1845 1846 for_each_sgtable_sg(sgt, sgl, i) { 1847 struct page *page = sg_page(sgl); 1848 1849 __free_page(page); 1850 } 1851 1852 sg_free_table(sgt); 1853 } 1854 1855 int 1856 nvkm_gsp_sg(struct nvkm_device *device, u64 size, struct sg_table *sgt) 1857 { 1858 const u64 pages = DIV_ROUND_UP(size, PAGE_SIZE); 1859 struct scatterlist *sgl; 1860 int ret, i; 1861 1862 ret = sg_alloc_table(sgt, pages, GFP_KERNEL); 1863 if (ret) 1864 return ret; 1865 1866 for_each_sgtable_sg(sgt, sgl, i) { 1867 struct page *page = alloc_page(GFP_KERNEL); 1868 1869 if (!page) { 1870 nvkm_gsp_sg_free(device, sgt); 1871 return -ENOMEM; 1872 } 1873 1874 sg_set_page(sgl, page, PAGE_SIZE, 0); 1875 } 1876 1877 ret = dma_map_sgtable(device->dev, sgt, DMA_BIDIRECTIONAL, 0); 1878 if (ret) 1879 nvkm_gsp_sg_free(device, sgt); 1880 1881 return ret; 1882 } 1883 1884 static void 1885 nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3) 1886 { 1887 for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--) 1888 nvkm_gsp_mem_dtor(gsp, &rx3->mem[i]); 1889 } 1890 1891 /** 1892 * nvkm_gsp_radix3_sg - build a radix3 table from a S/G list 1893 * 1894 * The GSP uses a three-level page table, called radix3, to map the firmware. 1895 * Each 64-bit "pointer" in the table is either the bus address of an entry in 1896 * the next table (for levels 0 and 1) or the bus address of the next page in 1897 * the GSP firmware image itself. 1898 * 1899 * Level 0 contains a single entry in one page that points to the first page 1900 * of level 1. 1901 * 1902 * Level 1, since it's also only one page in size, contains up to 512 entries, 1903 * one for each page in Level 2. 1904 * 1905 * Level 2 can be up to 512 pages in size, and each of those entries points to 1906 * the next page of the firmware image. Since there can be up to 512*512 1907 * pages, that limits the size of the firmware to 512*512*GSP_PAGE_SIZE = 1GB. 1908 * 1909 * Internally, the GSP has its window into system memory, but the base 1910 * physical address of the aperture is not 0. In fact, it varies depending on 1911 * the GPU architecture. Since the GPU is a PCI device, this window is 1912 * accessed via DMA and is therefore bound by IOMMU translation. The end 1913 * result is that GSP-RM must translate the bus addresses in the table to GSP 1914 * physical addresses. All this should happen transparently. 1915 * 1916 * Returns 0 on success, or negative error code 1917 * 1918 * See kgspCreateRadix3_IMPL 1919 */ 1920 static int 1921 nvkm_gsp_radix3_sg(struct nvkm_device *device, struct sg_table *sgt, u64 size, 1922 struct nvkm_gsp_radix3 *rx3) 1923 { 1924 u64 addr; 1925 1926 for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--) { 1927 u64 *ptes; 1928 int idx; 1929 1930 rx3->mem[i].size = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE); 1931 rx3->mem[i].data = dma_alloc_coherent(device->dev, rx3->mem[i].size, 1932 &rx3->mem[i].addr, GFP_KERNEL); 1933 if (WARN_ON(!rx3->mem[i].data)) 1934 return -ENOMEM; 1935 1936 ptes = rx3->mem[i].data; 1937 if (i == 2) { 1938 struct scatterlist *sgl; 1939 1940 for_each_sgtable_dma_sg(sgt, sgl, idx) { 1941 for (int j = 0; j < sg_dma_len(sgl) / GSP_PAGE_SIZE; j++) 1942 *ptes++ = sg_dma_address(sgl) + (GSP_PAGE_SIZE * j); 1943 } 1944 } else { 1945 for (int j = 0; j < size / GSP_PAGE_SIZE; j++) 1946 *ptes++ = addr + GSP_PAGE_SIZE * j; 1947 } 1948 1949 size = rx3->mem[i].size; 1950 addr = rx3->mem[i].addr; 1951 } 1952 1953 return 0; 1954 } 1955 1956 int 1957 r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend) 1958 { 1959 u32 mbox0 = 0xff, mbox1 = 0xff; 1960 int ret; 1961 1962 if (!gsp->running) 1963 return 0; 1964 1965 if (suspend) { 1966 GspFwWprMeta *meta = gsp->wpr_meta.data; 1967 u64 len = meta->gspFwWprEnd - meta->gspFwWprStart; 1968 GspFwSRMeta *sr; 1969 1970 ret = nvkm_gsp_sg(gsp->subdev.device, len, &gsp->sr.sgt); 1971 if (ret) 1972 return ret; 1973 1974 ret = nvkm_gsp_radix3_sg(gsp->subdev.device, &gsp->sr.sgt, len, &gsp->sr.radix3); 1975 if (ret) 1976 return ret; 1977 1978 ret = nvkm_gsp_mem_ctor(gsp, sizeof(*sr), &gsp->sr.meta); 1979 if (ret) 1980 return ret; 1981 1982 sr = gsp->sr.meta.data; 1983 sr->magic = GSP_FW_SR_META_MAGIC; 1984 sr->revision = GSP_FW_SR_META_REVISION; 1985 sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.mem[0].addr; 1986 sr->sizeOfSuspendResumeData = len; 1987 1988 mbox0 = lower_32_bits(gsp->sr.meta.addr); 1989 mbox1 = upper_32_bits(gsp->sr.meta.addr); 1990 } 1991 1992 ret = r535_gsp_rpc_unloading_guest_driver(gsp, suspend); 1993 if (WARN_ON(ret)) 1994 return ret; 1995 1996 nvkm_msec(gsp->subdev.device, 2000, 1997 if (nvkm_falcon_rd32(&gsp->falcon, 0x040) & 0x80000000) 1998 break; 1999 ); 2000 2001 nvkm_falcon_reset(&gsp->falcon); 2002 2003 ret = nvkm_gsp_fwsec_sb(gsp); 2004 WARN_ON(ret); 2005 2006 ret = r535_gsp_booter_unload(gsp, mbox0, mbox1); 2007 WARN_ON(ret); 2008 2009 gsp->running = false; 2010 return 0; 2011 } 2012 2013 int 2014 r535_gsp_init(struct nvkm_gsp *gsp) 2015 { 2016 u32 mbox0, mbox1; 2017 int ret; 2018 2019 if (!gsp->sr.meta.data) { 2020 mbox0 = lower_32_bits(gsp->wpr_meta.addr); 2021 mbox1 = upper_32_bits(gsp->wpr_meta.addr); 2022 } else { 2023 r535_gsp_rmargs_init(gsp, true); 2024 2025 mbox0 = lower_32_bits(gsp->sr.meta.addr); 2026 mbox1 = upper_32_bits(gsp->sr.meta.addr); 2027 } 2028 2029 /* Execute booter to handle (eventually...) booting GSP-RM. */ 2030 ret = r535_gsp_booter_load(gsp, mbox0, mbox1); 2031 if (WARN_ON(ret)) 2032 goto done; 2033 2034 ret = r535_gsp_rpc_poll(gsp, NV_VGPU_MSG_EVENT_GSP_INIT_DONE); 2035 if (ret) 2036 goto done; 2037 2038 gsp->running = true; 2039 2040 done: 2041 if (gsp->sr.meta.data) { 2042 nvkm_gsp_mem_dtor(gsp, &gsp->sr.meta); 2043 nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3); 2044 nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt); 2045 return ret; 2046 } 2047 2048 if (ret == 0) 2049 ret = r535_gsp_postinit(gsp); 2050 2051 return ret; 2052 } 2053 2054 static int 2055 r535_gsp_rm_boot_ctor(struct nvkm_gsp *gsp) 2056 { 2057 const struct firmware *fw = gsp->fws.bl; 2058 const struct nvfw_bin_hdr *hdr; 2059 RM_RISCV_UCODE_DESC *desc; 2060 int ret; 2061 2062 hdr = nvfw_bin_hdr(&gsp->subdev, fw->data); 2063 desc = (void *)fw->data + hdr->header_offset; 2064 2065 ret = nvkm_gsp_mem_ctor(gsp, hdr->data_size, &gsp->boot.fw); 2066 if (ret) 2067 return ret; 2068 2069 memcpy(gsp->boot.fw.data, fw->data + hdr->data_offset, hdr->data_size); 2070 2071 gsp->boot.code_offset = desc->monitorCodeOffset; 2072 gsp->boot.data_offset = desc->monitorDataOffset; 2073 gsp->boot.manifest_offset = desc->manifestOffset; 2074 gsp->boot.app_version = desc->appVersion; 2075 return 0; 2076 } 2077 2078 static const struct nvkm_firmware_func 2079 r535_gsp_fw = { 2080 .type = NVKM_FIRMWARE_IMG_SGT, 2081 }; 2082 2083 static int 2084 r535_gsp_elf_section(struct nvkm_gsp *gsp, const char *name, const u8 **pdata, u64 *psize) 2085 { 2086 const u8 *img = gsp->fws.rm->data; 2087 const struct elf64_hdr *ehdr = (const struct elf64_hdr *)img; 2088 const struct elf64_shdr *shdr = (const struct elf64_shdr *)&img[ehdr->e_shoff]; 2089 const char *names = &img[shdr[ehdr->e_shstrndx].sh_offset]; 2090 2091 for (int i = 0; i < ehdr->e_shnum; i++, shdr++) { 2092 if (!strcmp(&names[shdr->sh_name], name)) { 2093 *pdata = &img[shdr->sh_offset]; 2094 *psize = shdr->sh_size; 2095 return 0; 2096 } 2097 } 2098 2099 nvkm_error(&gsp->subdev, "section '%s' not found\n", name); 2100 return -ENOENT; 2101 } 2102 2103 static void 2104 r535_gsp_dtor_fws(struct nvkm_gsp *gsp) 2105 { 2106 nvkm_firmware_put(gsp->fws.bl); 2107 gsp->fws.bl = NULL; 2108 nvkm_firmware_put(gsp->fws.booter.unload); 2109 gsp->fws.booter.unload = NULL; 2110 nvkm_firmware_put(gsp->fws.booter.load); 2111 gsp->fws.booter.load = NULL; 2112 nvkm_firmware_put(gsp->fws.rm); 2113 gsp->fws.rm = NULL; 2114 } 2115 2116 void 2117 r535_gsp_dtor(struct nvkm_gsp *gsp) 2118 { 2119 idr_destroy(&gsp->client_id.idr); 2120 mutex_destroy(&gsp->client_id.mutex); 2121 2122 nvkm_gsp_radix3_dtor(gsp, &gsp->radix3); 2123 nvkm_gsp_mem_dtor(gsp, &gsp->sig); 2124 nvkm_firmware_dtor(&gsp->fw); 2125 2126 nvkm_falcon_fw_dtor(&gsp->booter.unload); 2127 nvkm_falcon_fw_dtor(&gsp->booter.load); 2128 2129 mutex_destroy(&gsp->msgq.mutex); 2130 mutex_destroy(&gsp->cmdq.mutex); 2131 2132 r535_gsp_dtor_fws(gsp); 2133 } 2134 2135 int 2136 r535_gsp_oneinit(struct nvkm_gsp *gsp) 2137 { 2138 struct nvkm_device *device = gsp->subdev.device; 2139 const u8 *data; 2140 u64 size; 2141 int ret; 2142 2143 mutex_init(&gsp->cmdq.mutex); 2144 mutex_init(&gsp->msgq.mutex); 2145 2146 ret = gsp->func->booter.ctor(gsp, "booter-load", gsp->fws.booter.load, 2147 &device->sec2->falcon, &gsp->booter.load); 2148 if (ret) 2149 return ret; 2150 2151 ret = gsp->func->booter.ctor(gsp, "booter-unload", gsp->fws.booter.unload, 2152 &device->sec2->falcon, &gsp->booter.unload); 2153 if (ret) 2154 return ret; 2155 2156 /* Load GSP firmware from ELF image into DMA-accessible memory. */ 2157 ret = r535_gsp_elf_section(gsp, ".fwimage", &data, &size); 2158 if (ret) 2159 return ret; 2160 2161 ret = nvkm_firmware_ctor(&r535_gsp_fw, "gsp-rm", device, data, size, &gsp->fw); 2162 if (ret) 2163 return ret; 2164 2165 /* Load relevant signature from ELF image. */ 2166 ret = r535_gsp_elf_section(gsp, gsp->func->sig_section, &data, &size); 2167 if (ret) 2168 return ret; 2169 2170 ret = nvkm_gsp_mem_ctor(gsp, ALIGN(size, 256), &gsp->sig); 2171 if (ret) 2172 return ret; 2173 2174 memcpy(gsp->sig.data, data, size); 2175 2176 /* Build radix3 page table for ELF image. */ 2177 ret = nvkm_gsp_radix3_sg(device, &gsp->fw.mem.sgt, gsp->fw.len, &gsp->radix3); 2178 if (ret) 2179 return ret; 2180 2181 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER, 2182 r535_gsp_msg_run_cpu_sequencer, gsp); 2183 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_POST_EVENT, r535_gsp_msg_post_event, gsp); 2184 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_RC_TRIGGERED, 2185 r535_gsp_msg_rc_triggered, gsp); 2186 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED, 2187 r535_gsp_msg_mmu_fault_queued, gsp); 2188 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_OS_ERROR_LOG, r535_gsp_msg_os_error_log, gsp); 2189 2190 ret = r535_gsp_rm_boot_ctor(gsp); 2191 if (ret) 2192 return ret; 2193 2194 /* Release FW images - we've copied them to DMA buffers now. */ 2195 r535_gsp_dtor_fws(gsp); 2196 2197 /* Calculate FB layout. */ 2198 gsp->fb.wpr2.frts.size = 0x100000; 2199 gsp->fb.wpr2.frts.addr = ALIGN_DOWN(gsp->fb.bios.addr, 0x20000) - gsp->fb.wpr2.frts.size; 2200 2201 gsp->fb.wpr2.boot.size = gsp->boot.fw.size; 2202 gsp->fb.wpr2.boot.addr = ALIGN_DOWN(gsp->fb.wpr2.frts.addr - gsp->fb.wpr2.boot.size, 0x1000); 2203 2204 gsp->fb.wpr2.elf.size = gsp->fw.len; 2205 gsp->fb.wpr2.elf.addr = ALIGN_DOWN(gsp->fb.wpr2.boot.addr - gsp->fb.wpr2.elf.size, 0x10000); 2206 2207 { 2208 u32 fb_size_gb = DIV_ROUND_UP_ULL(gsp->fb.size, 1 << 30); 2209 2210 gsp->fb.wpr2.heap.size = 2211 gsp->func->wpr_heap.os_carveout_size + 2212 gsp->func->wpr_heap.base_size + 2213 ALIGN(GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB * fb_size_gb, 1 << 20) + 2214 ALIGN(GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE, 1 << 20); 2215 2216 gsp->fb.wpr2.heap.size = max(gsp->fb.wpr2.heap.size, gsp->func->wpr_heap.min_size); 2217 } 2218 2219 gsp->fb.wpr2.heap.addr = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.size, 0x100000); 2220 gsp->fb.wpr2.heap.size = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.addr, 0x100000); 2221 2222 gsp->fb.wpr2.addr = ALIGN_DOWN(gsp->fb.wpr2.heap.addr - sizeof(GspFwWprMeta), 0x100000); 2223 gsp->fb.wpr2.size = gsp->fb.wpr2.frts.addr + gsp->fb.wpr2.frts.size - gsp->fb.wpr2.addr; 2224 2225 gsp->fb.heap.size = 0x100000; 2226 gsp->fb.heap.addr = gsp->fb.wpr2.addr - gsp->fb.heap.size; 2227 2228 ret = nvkm_gsp_fwsec_frts(gsp); 2229 if (WARN_ON(ret)) 2230 return ret; 2231 2232 ret = r535_gsp_libos_init(gsp); 2233 if (WARN_ON(ret)) 2234 return ret; 2235 2236 ret = r535_gsp_wpr_meta_init(gsp); 2237 if (WARN_ON(ret)) 2238 return ret; 2239 2240 ret = r535_gsp_rpc_set_system_info(gsp); 2241 if (WARN_ON(ret)) 2242 return ret; 2243 2244 ret = r535_gsp_rpc_set_registry(gsp); 2245 if (WARN_ON(ret)) 2246 return ret; 2247 2248 /* Reset GSP into RISC-V mode. */ 2249 ret = gsp->func->reset(gsp); 2250 if (WARN_ON(ret)) 2251 return ret; 2252 2253 nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr)); 2254 nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr)); 2255 2256 mutex_init(&gsp->client_id.mutex); 2257 idr_init(&gsp->client_id.idr); 2258 return 0; 2259 } 2260 2261 static int 2262 r535_gsp_load_fw(struct nvkm_gsp *gsp, const char *name, const char *ver, 2263 const struct firmware **pfw) 2264 { 2265 char fwname[64]; 2266 2267 snprintf(fwname, sizeof(fwname), "gsp/%s-%s", name, ver); 2268 return nvkm_firmware_get(&gsp->subdev, fwname, 0, pfw); 2269 } 2270 2271 int 2272 r535_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif) 2273 { 2274 struct nvkm_subdev *subdev = &gsp->subdev; 2275 int ret; 2276 2277 if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", fwif->enable)) 2278 return -EINVAL; 2279 2280 if ((ret = r535_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm)) || 2281 (ret = r535_gsp_load_fw(gsp, "booter_load", fwif->ver, &gsp->fws.booter.load)) || 2282 (ret = r535_gsp_load_fw(gsp, "booter_unload", fwif->ver, &gsp->fws.booter.unload)) || 2283 (ret = r535_gsp_load_fw(gsp, "bootloader", fwif->ver, &gsp->fws.bl))) { 2284 r535_gsp_dtor_fws(gsp); 2285 return ret; 2286 } 2287 2288 return 0; 2289 } 2290 2291 #define NVKM_GSP_FIRMWARE(chip) \ 2292 MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_load-535.113.01.bin"); \ 2293 MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_unload-535.113.01.bin"); \ 2294 MODULE_FIRMWARE("nvidia/"#chip"/gsp/bootloader-535.113.01.bin"); \ 2295 MODULE_FIRMWARE("nvidia/"#chip"/gsp/gsp-535.113.01.bin") 2296 2297 NVKM_GSP_FIRMWARE(tu102); 2298 NVKM_GSP_FIRMWARE(tu104); 2299 NVKM_GSP_FIRMWARE(tu106); 2300 2301 NVKM_GSP_FIRMWARE(tu116); 2302 NVKM_GSP_FIRMWARE(tu117); 2303 2304 NVKM_GSP_FIRMWARE(ga100); 2305 2306 NVKM_GSP_FIRMWARE(ga102); 2307 NVKM_GSP_FIRMWARE(ga103); 2308 NVKM_GSP_FIRMWARE(ga104); 2309 NVKM_GSP_FIRMWARE(ga106); 2310 NVKM_GSP_FIRMWARE(ga107); 2311 2312 NVKM_GSP_FIRMWARE(ad102); 2313 NVKM_GSP_FIRMWARE(ad103); 2314 NVKM_GSP_FIRMWARE(ad104); 2315 NVKM_GSP_FIRMWARE(ad106); 2316 NVKM_GSP_FIRMWARE(ad107); 2317