1 /*
2 * SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 * SPDX-License-Identifier: MIT
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /*!
25 *
26 * @file kern_fsp.c
27 * @brief Provides all kernel side interfaces for tracking the FSP state,
28 * submitting commands and parsing replies needed from the CPU.
29 */
30 #include "gpu/gpu.h"
31 #include "gpu/fsp/kern_fsp.h"
32 #include "nvrm_registry.h"
33
34 #if RMCFG_MODULE_ENABLED (GSP)
35 #include "gpu/gsp/gsp.h"
36 #include "objflcnable.h"
37 #endif
38
39 /*!
40 * Local object related functions
41 */
42 static void kfspInitRegistryOverrides(OBJGPU *, KernelFsp *);
43
44 static NV_STATUS kfspReadMessage(OBJGPU *pGpu, KernelFsp *pKernelFsp, NvU8 *pPayloadBuffer, NvU32 payloadBufferSize);
45
46 NV_STATUS
kfspConstructEngine_IMPL(OBJGPU * pGpu,KernelFsp * pKernelFsp,ENGDESCRIPTOR engDesc)47 kfspConstructEngine_IMPL(OBJGPU *pGpu, KernelFsp *pKernelFsp, ENGDESCRIPTOR engDesc)
48 {
49
50 // Initialize based on registry keys
51 kfspInitRegistryOverrides(pGpu, pKernelFsp);
52 if (pKernelFsp->getProperty(pKernelFsp, PDB_PROP_KFSP_IS_MISSING))
53 {
54 NV_PRINTF(LEVEL_WARNING, "KernelFsp is disabled\n");
55 return NV_ERR_OBJECT_NOT_FOUND;
56 }
57 return NV_OK;
58 }
59
60 /*!
61 * Initialize all registry overrides for this object
62 *
63 * @param[in] pGpu GPU object pointer
64 * @param[in] pKernelFsp KernelFsp object pointer
65 */
66 static void
kfspInitRegistryOverrides(OBJGPU * pGpu,KernelFsp * pKernelFsp)67 kfspInitRegistryOverrides
68 (
69 OBJGPU *pGpu,
70 KernelFsp *pKernelFsp
71 )
72 {
73 NvU32 data = 0;
74
75 if (((osReadRegistryDword(pGpu, NV_REG_STR_RM_DISABLE_FSP, &data) == NV_OK) &&
76 (data == NV_REG_STR_RM_DISABLE_FSP_YES) && IS_EMULATION(pGpu)) ||
77 IS_FMODEL(pGpu) || IS_RTLSIM(pGpu))
78 {
79 //
80 // Force disable FSP engine, used only on emulation because some
81 // emulation netlists stub out FSP but leave the engine in PTOP
82 //
83 NV_PRINTF(LEVEL_WARNING, "FSP disabled due to regkey override.\n");
84 pKernelFsp->setProperty(pKernelFsp, PDB_PROP_KFSP_IS_MISSING, NV_TRUE);
85 }
86
87 if (osReadRegistryDword(pGpu, NV_REG_STR_RM_DISABLE_COT_CMD, &data) == NV_OK)
88 {
89 // Assume non-zero value only has NV_REG_STR_RM_DISABLE_COT_CMD_YES
90 if (data & DRF_SHIFTMASK(NV_REG_STR_RM_DISABLE_COT_CMD_FRTS_SYSMEM))
91 {
92 pKernelFsp->setProperty(pKernelFsp, PDB_PROP_KFSP_DISABLE_FRTS_SYSMEM, NV_TRUE);
93 }
94
95 if (data & DRF_SHIFTMASK(NV_REG_STR_RM_DISABLE_COT_CMD_FRTS_VIDMEM))
96 {
97 pKernelFsp->setProperty(pKernelFsp, PDB_PROP_KFSP_DISABLE_FRTS_VIDMEM, NV_TRUE);
98 }
99
100 if (data & DRF_SHIFTMASK(NV_REG_STR_RM_DISABLE_COT_CMD_GSPFMC))
101 {
102 pKernelFsp->setProperty(pKernelFsp, PDB_PROP_KFSP_DISABLE_GSPFMC, NV_TRUE);
103 }
104 }
105
106 // Inst-in-sys must only set up FRTS in SYSMEM. This includes FB broken and cache only.
107 if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM) ||
108 pGpu->getProperty(pGpu, PDB_PROP_GPU_BROKEN_FB) ||
109 gpuIsCacheOnlyModeEnabled(pGpu))
110 {
111 pKernelFsp->setProperty(pKernelFsp, PDB_PROP_KFSP_DISABLE_FRTS_VIDMEM, NV_TRUE);
112 }
113 }
114
115
116 /*!
117 * @brief FSP State Initialization.
118 *
119 * Initializes all software states including allocating the dbg memory surface
120 * and the initialization of FSP HAL layer.
121 *
122 * @param[in] pGpu GPU object pointer
123 * @param[in] pKernelFsp FSP object pointer
124 *
125 * @return 'NV_OK' if state-initialization was successful.
126 * @return other bubbles up errors from @ref kfspStateInitHal_HAL on failure
127 */
128 NV_STATUS
kfspStateInitUnlocked_IMPL(OBJGPU * pGpu,KernelFsp * pKernelFsp)129 kfspStateInitUnlocked_IMPL
130 (
131 OBJGPU *pGpu,
132 KernelFsp *pKernelFsp
133 )
134 {
135 return NV_OK;
136 }
137
138 /*!
139 * @brief Clean up objects used when sending GSP-FMC and FRTS info to FSP
140 *
141 * @param[in] pGpu GPU object pointer
142 * @param[in] pKernelFsp FSP object pointer
143 */
144 void
kfspCleanupBootState_IMPL(OBJGPU * pGpu,KernelFsp * pKernelFsp)145 kfspCleanupBootState_IMPL
146 (
147 OBJGPU *pGpu,
148 KernelFsp *pKernelFsp
149 )
150 {
151 portMemFree(pKernelFsp->pCotPayload);
152 pKernelFsp->pCotPayload = NULL;
153
154 if (pKernelFsp->pSysmemFrtsMemdesc != NULL)
155 {
156 kfspFrtsSysmemLocationClear_HAL(pGpu, pKernelFsp);
157 memdescUnmap(pKernelFsp->pSysmemFrtsMemdesc, NV_TRUE, 0,
158 memdescGetKernelMapping(pKernelFsp->pSysmemFrtsMemdesc),
159 memdescGetKernelMappingPriv(pKernelFsp->pSysmemFrtsMemdesc));
160 memdescFree(pKernelFsp->pSysmemFrtsMemdesc);
161 memdescDestroy(pKernelFsp->pSysmemFrtsMemdesc);
162 pKernelFsp->pSysmemFrtsMemdesc = NULL;
163 }
164
165 if (pKernelFsp->pGspFmcMemdesc != NULL)
166 {
167 memdescFree(pKernelFsp->pGspFmcMemdesc);
168 memdescDestroy(pKernelFsp->pGspFmcMemdesc);
169 pKernelFsp->pGspFmcMemdesc = NULL;
170 }
171
172 if (pKernelFsp->pGspBootArgsMemdesc != NULL)
173 {
174 memdescFree(pKernelFsp->pGspBootArgsMemdesc);
175 memdescDestroy(pKernelFsp->pGspBootArgsMemdesc);
176 pKernelFsp->pGspBootArgsMemdesc = NULL;
177 }
178
179 }
180
181 /*!
182 * @brief Destroy FSP state
183 *
184 * @param[in] pGpu GPU object pointer
185 * @param[in] pKernelFsp FSP object pointer
186 */
187 void
kfspStateDestroy_IMPL(OBJGPU * pGpu,KernelFsp * pKernelFsp)188 kfspStateDestroy_IMPL
189 (
190 OBJGPU *pGpu,
191 KernelFsp *pKernelFsp
192 )
193 {
194 kfspCleanupBootState(pGpu, pKernelFsp);
195
196 if (pKernelFsp->pVidmemFrtsMemdesc != NULL)
197 {
198 memdescFree(pKernelFsp->pVidmemFrtsMemdesc);
199 memdescDestroy(pKernelFsp->pVidmemFrtsMemdesc);
200 pKernelFsp->pVidmemFrtsMemdesc = NULL;
201 }
202 }
203
204 /*!
205 * @brief Override default behaviour of reset
206 *
207 * @param[in] pGpu GPU object pointer
208 * @param[in] pKernelFsp FSP object pointer
209 */
210 void
kfspSecureReset_IMPL(OBJGPU * pGpu,KernelFsp * pKernelFsp)211 kfspSecureReset_IMPL
212 (
213 OBJGPU *pGpu,
214 KernelFsp *pKernelFsp
215 )
216 {
217 // Should not reset FSP
218 NV_PRINTF(LEVEL_ERROR, "FSP cannot be reset by CPU.\n");
219 NV_ASSERT(0);
220 return;
221 }
222
223
224 /*!
225 * @brief Check if FSP RM command queue is empty
226 *
227 * @param[in] pGpu OBJGPU pointer
228 * @param[in] pKernelFsp KernelFsp pointer
229 *
230 * @return NV_TRUE if queue is empty, NV_FALSE otherwise
231 */
232 NvBool
kfspIsQueueEmpty_IMPL(OBJGPU * pGpu,KernelFsp * pKernelFsp)233 kfspIsQueueEmpty_IMPL
234 (
235 OBJGPU *pGpu,
236 KernelFsp *pKernelFsp
237 )
238 {
239 NvU32 cmdqHead, cmdqTail;
240
241 kfspGetQueueHeadTail_HAL(pGpu, pKernelFsp, &cmdqHead, &cmdqTail);
242
243 // FSP will set QUEUE_HEAD = TAIL after each packet is received
244 return (cmdqHead == cmdqTail);
245 }
246
247 /*!
248 * @brief Wait for FSP RM command queue to be empty
249 *
250 * @param[in] pGpu OBJGPU pointer
251 * @param[in] pKernelFsp KernelFsp pointer
252 *
253 * @return NV_OK, or NV_ERR_TIMEOUT
254 */
255 NV_STATUS
kfspPollForQueueEmpty_IMPL(OBJGPU * pGpu,KernelFsp * pKernelFsp)256 kfspPollForQueueEmpty_IMPL
257 (
258 OBJGPU *pGpu,
259 KernelFsp *pKernelFsp
260 )
261 {
262 NV_STATUS status = NV_OK;
263 RMTIMEOUT timeout;
264
265 gpuSetTimeout(pGpu, GPU_TIMEOUT_DEFAULT, &timeout,
266 GPU_TIMEOUT_FLAGS_OSTIMER |
267 GPU_TIMEOUT_FLAGS_BYPASS_THREAD_STATE);
268
269 while (!kfspIsQueueEmpty(pGpu, pKernelFsp))
270 {
271 //
272 // For now we assume that any response from FSP before RM message
273 // send is complete indicates an error and we should abort.
274 //
275 // Ongoing dicussion on usefullness of this check. Bug to be filed.
276 //
277 if (!kfspIsMsgQueueEmpty(pGpu, pKernelFsp))
278 {
279 kfspReadMessage(pGpu, pKernelFsp, NULL, 0);
280 NV_PRINTF(LEVEL_ERROR,
281 "Received error message from FSP while waiting for CMDQ to be empty.\n");
282 status = NV_ERR_GENERIC;
283 break;
284 }
285
286 osSpinLoop();
287
288 status = gpuCheckTimeout(pGpu, &timeout);
289 if (status != NV_OK)
290 {
291 if ((status == NV_ERR_TIMEOUT) &&
292 kfspIsQueueEmpty(pGpu, pKernelFsp))
293 {
294 status = NV_OK;
295 }
296 else
297 {
298 NV_PRINTF(LEVEL_ERROR,
299 "Timed out waiting for FSP command queue to be empty.\n");
300 }
301 break;
302 }
303 }
304
305 return status;
306 }
307
308 /*!
309 * @brief Check if FSP RM message queue is empty
310 *
311 * @param[in] pGpu OBJGPU pointer
312 * @param[in] pKernelFsp KernelFsp pointer
313 *
314 * @return NV_TRUE if queue is empty, NV_FALSE otherwise
315 */
316 NvBool
kfspIsMsgQueueEmpty_IMPL(OBJGPU * pGpu,KernelFsp * pKernelFsp)317 kfspIsMsgQueueEmpty_IMPL
318 (
319 OBJGPU *pGpu,
320 KernelFsp *pKernelFsp
321 )
322 {
323 NvU32 msgqHead, msgqTail;
324
325 kfspGetMsgQueueHeadTail_HAL(pGpu, pKernelFsp, &msgqHead, &msgqTail);
326 return (msgqHead == msgqTail);
327 }
328
329 /*!
330 * @brief Poll for response from FSP via RM message queue
331 *
332 * @param[in] pGpu OBJGPU pointer
333 * @param[in] pKernelFsp KernelFsp pointer
334 *
335 * @return NV_OK, or NV_ERR_TIMEOUT
336 */
337 NV_STATUS
kfspPollForResponse_IMPL(OBJGPU * pGpu,KernelFsp * pKernelFsp)338 kfspPollForResponse_IMPL
339 (
340 OBJGPU *pGpu,
341 KernelFsp *pKernelFsp
342 )
343 {
344 RMTIMEOUT timeout;
345 NV_STATUS status = NV_OK;
346
347 // Poll for message queue to wait for FSP's reply
348 gpuSetTimeout(pGpu, GPU_TIMEOUT_DEFAULT, &timeout, GPU_TIMEOUT_FLAGS_OSTIMER | GPU_TIMEOUT_FLAGS_BYPASS_THREAD_STATE);
349 while (kfspIsMsgQueueEmpty(pGpu, pKernelFsp))
350 {
351 if (gpuCheckTimeout(pGpu, &timeout) == NV_ERR_TIMEOUT)
352 {
353 NV_PRINTF(LEVEL_ERROR, "FSP command timed out\n");
354 return NV_ERR_TIMEOUT;
355 }
356
357 osSpinLoop();
358 }
359
360 return status;
361 }
362
363 /*!
364 * @brief Read and process message from FSP via RM message queue.
365 *
366 * Supports both single and multi-packet message. For multi-packet messages, this
367 * loops until all packets are received, polling at each iteration for the next
368 * packet to come in. If a buffer is provided, the message payload will be
369 * returned there.
370 *
371 * @note: For multi-packet messages, a buffer in which the message payload will
372 * be reconstructed must be provided.
373 *
374 * @param[in] pGpu OBJGPU pointer
375 * @param[in] pKernelFsp KernelFsp pointer
376 * @param[in/out] pPayloadBuffer Buffer in which to return message payload
377 * @param[in] payloadBufferSize Payload buffer size
378 *
379 * @return NV_OK, NV_ERR_INVALID_DATA, NV_ERR_INSUFFICIENT_RESOURCES, or errors
380 * from functions called within
381 */
382 static NV_STATUS
kfspReadMessage(OBJGPU * pGpu,KernelFsp * pKernelFsp,NvU8 * pPayloadBuffer,NvU32 payloadBufferSize)383 kfspReadMessage
384 (
385 OBJGPU *pGpu,
386 KernelFsp *pKernelFsp,
387 NvU8 *pPayloadBuffer,
388 NvU32 payloadBufferSize
389 )
390 {
391 NvU8 *pPacketBuffer;
392 NV_STATUS status;
393 NvU32 totalPayloadSize = 0;
394 MCTP_PACKET_STATE packetState = MCTP_PACKET_STATE_START;
395
396 if (kfspIsMsgQueueEmpty(pGpu, pKernelFsp))
397 {
398 NV_PRINTF(LEVEL_WARNING, "Tried to read FSP response but MSG queue is empty\n");
399 return NV_OK;
400 }
401
402 pPacketBuffer = portMemAllocNonPaged(kfspGetRmChannelSize_HAL(pGpu, pKernelFsp));
403 NV_CHECK_OR_RETURN(LEVEL_ERROR, pPacketBuffer != NULL, NV_ERR_NO_MEMORY);
404
405 while ((packetState != MCTP_PACKET_STATE_END) && (packetState != MCTP_PACKET_STATE_SINGLE_PACKET))
406 {
407 NvU32 msgqHead, msgqTail;
408 NvU32 packetSize;
409 NvU32 curPayloadSize;
410 NvU8 curHeaderSize;
411 NvU8 tag;
412
413 // Wait for next packet
414 status = kfspPollForResponse(pGpu, pKernelFsp);
415 if (status != NV_OK)
416 {
417 goto done;
418 }
419
420 kfspGetMsgQueueHeadTail_HAL(pGpu, pKernelFsp, &msgqHead, &msgqTail);
421
422 // Tail points to last DWORD in packet, not DWORD immediately following it
423 packetSize = (msgqTail - msgqHead) + sizeof(NvU32);
424
425 if ((packetSize < sizeof(NvU32)) ||
426 (packetSize > kfspGetRmChannelSize_HAL(pGpu, pKernelFsp)))
427 {
428 NV_PRINTF(LEVEL_ERROR, "FSP response packet is invalid size: size=0x%x bytes\n", packetSize);
429 status = NV_ERR_INVALID_DATA;
430 goto done;
431 }
432
433 kfspReadFromEmem_HAL(pGpu, pKernelFsp, pPacketBuffer, packetSize);
434
435 status = kfspGetPacketInfo_HAL(pGpu, pKernelFsp, pPacketBuffer, packetSize, &packetState, &tag);
436 if (status != NV_OK)
437 {
438 goto done;
439 }
440
441 if ((packetState == MCTP_PACKET_STATE_START) || (packetState == MCTP_PACKET_STATE_SINGLE_PACKET))
442 {
443 // Packet contains payload header
444 curHeaderSize = sizeof(MCTP_HEADER);
445 }
446 else
447 {
448 curHeaderSize = sizeof(NvU32);
449 }
450
451 curPayloadSize = packetSize - curHeaderSize;
452
453 if ((pPayloadBuffer == NULL) && (packetState != MCTP_PACKET_STATE_SINGLE_PACKET))
454 {
455 NV_PRINTF(LEVEL_ERROR, "No buffer provided when receiving multi-packet message. Buffer needed to reconstruct message\n");
456 status = NV_ERR_INSUFFICIENT_RESOURCES;
457 goto done;
458 }
459
460 if (pPayloadBuffer != NULL)
461 {
462 if (payloadBufferSize < (totalPayloadSize + curPayloadSize))
463 {
464 NV_PRINTF(LEVEL_ERROR, "Buffer provided for message payload too small. Payload size: 0x%x Buffer size: 0x%x\n",
465 totalPayloadSize + curPayloadSize, payloadBufferSize);
466 status = NV_ERR_INSUFFICIENT_RESOURCES;
467 goto done;
468 }
469 portMemCopy(pPayloadBuffer + totalPayloadSize, payloadBufferSize - totalPayloadSize ,
470 pPacketBuffer + curHeaderSize, curPayloadSize);
471 }
472 totalPayloadSize += curPayloadSize;
473
474 // Set TAIL = HEAD to indicate CPU received message
475 kfspUpdateMsgQueueHeadTail_HAL(pGpu, pKernelFsp, msgqHead, msgqHead);
476 }
477
478 NvU8 *pMessagePayload = (pPayloadBuffer == NULL) ? (pPacketBuffer + sizeof(MCTP_HEADER)) : pPayloadBuffer;
479
480 status = kfspProcessNvdmMessage_HAL(pGpu, pKernelFsp, pMessagePayload, totalPayloadSize);
481
482 done:
483 portMemFree(pPacketBuffer);
484 return status;
485 }
486
487 /*!
488 * @brief Send one MCTP packet to FSP via EMEM
489 *
490 * @param[in] pGpu OBJGPU pointer
491 * @param[in] pKernelFsp KernelFsp pointer
492 * @param[in] pPacket MCTP packet
493 * @param[in] packetSize MCTP packet size in bytes
494 *
495 * @return NV_OK, or NV_ERR_INSUFFICIENT_RESOURCES
496 */
497 NV_STATUS
kfspSendPacket_IMPL(OBJGPU * pGpu,KernelFsp * pKernelFsp,NvU8 * pPacket,NvU32 packetSize)498 kfspSendPacket_IMPL
499 (
500 OBJGPU *pGpu,
501 KernelFsp *pKernelFsp,
502 NvU8 *pPacket,
503 NvU32 packetSize
504 )
505 {
506 NvU32 paddedSize;
507 NvU8 *pBuffer = NULL;
508 NV_STATUS status = NV_OK;
509
510 // Check that queue is ready to receive data
511 status = kfspPollForQueueEmpty(pGpu, pKernelFsp);
512 if (status != NV_OK)
513 {
514 return NV_ERR_INSUFFICIENT_RESOURCES;
515 }
516
517 // Pad to align size to 4-bytes boundary since EMEMC increments by DWORDS
518 paddedSize = NV_ALIGN_UP(packetSize, sizeof(NvU32));
519 pBuffer = portMemAllocNonPaged(paddedSize);
520 NV_CHECK_OR_RETURN(LEVEL_ERROR, pBuffer != NULL, NV_ERR_NO_MEMORY);
521 portMemSet(pBuffer, 0, paddedSize);
522 portMemCopy(pBuffer, paddedSize, pPacket, paddedSize);
523
524 kfspWriteToEmem_HAL(pGpu, pKernelFsp, pBuffer, paddedSize);
525
526 // Update HEAD and TAIL with new EMEM offset; RM always starts at offset 0.
527 kfspUpdateQueueHeadTail_HAL(pGpu, pKernelFsp, 0, paddedSize - sizeof(NvU32));
528
529 portMemFree(pBuffer);
530 return status;
531 }
532
533 /*!
534 * @brief Send a MCTP message to FSP via EMEM, and read response
535 *
536 *
537 * Response payload buffer is optional if response fits in a single packet.
538 *
539 * @param[in] pGpu OBJGPU pointer
540 * @param[in] pKernelFsp KernelFsp pointer
541 * @param[in] pPayload Pointer to message payload
542 * @param[in] size Message payload size
543 * @param[in] nvdmType NVDM type of message being sent
544 * @param[in] pResponsePayload Buffer in which to return response payload
545 * @param[in] responseBufferSize Response payload buffer size
546 *
547 * @return NV_OK, or NV_ERR_*
548 */
549 NV_STATUS
kfspSendAndReadMessage_IMPL(OBJGPU * pGpu,KernelFsp * pKernelFsp,NvU8 * pPayload,NvU32 size,NvU32 nvdmType,NvU8 * pResponsePayload,NvU32 responseBufferSize)550 kfspSendAndReadMessage_IMPL
551 (
552 OBJGPU *pGpu,
553 KernelFsp *pKernelFsp,
554 NvU8 *pPayload,
555 NvU32 size,
556 NvU32 nvdmType,
557 NvU8 *pResponsePayload,
558 NvU32 responseBufferSize
559 )
560 {
561 NvU32 dataSent, dataRemaining;
562 NvU32 packetPayloadCapacity;
563 NvU32 curPayloadSize;
564 NvU32 headerSize;
565 NvU32 fspEmemRmChannelSize;
566 NvBool bSinglePacket;
567 NV_STATUS status;
568 NvU8 *pBuffer = NULL;
569 NvU8 seq = 0;
570 NvU8 seid = 0;
571
572 // Allocate buffer of same size as channel
573 fspEmemRmChannelSize = kfspGetRmChannelSize_HAL(pGpu, pKernelFsp);
574 pBuffer = portMemAllocNonPaged(fspEmemRmChannelSize);
575 NV_CHECK_OR_RETURN(LEVEL_ERROR, pBuffer != NULL, NV_ERR_NO_MEMORY);
576 portMemSet(pBuffer, 0, fspEmemRmChannelSize);
577
578 //
579 // Check if message will fit in single packet
580 // We lose 2 DWORDS to MCTP and NVDM headers
581 //
582 headerSize = 2 * sizeof(NvU32);
583 packetPayloadCapacity = fspEmemRmChannelSize - headerSize;
584 bSinglePacket = (size <= packetPayloadCapacity);
585
586 // First packet
587 seid = kfspNvdmToSeid_HAL(pGpu, pKernelFsp, nvdmType);
588 ((NvU32 *)pBuffer)[0] = kfspCreateMctpHeader_HAL(pGpu, pKernelFsp, 1, (NvU8)bSinglePacket, seid, seq); // SOM=1,EOM=?,SEID,SEQ=0
589 ((NvU32 *)pBuffer)[1] = kfspCreateNvdmHeader_HAL(pGpu, pKernelFsp, nvdmType);
590
591 curPayloadSize = NV_MIN(size, packetPayloadCapacity);
592 portMemCopy(pBuffer + headerSize, packetPayloadCapacity, pPayload, curPayloadSize);
593
594 status = kfspSendPacket(pGpu, pKernelFsp, pBuffer, curPayloadSize + headerSize);
595 if (status != NV_OK)
596 {
597 goto failed;
598 }
599
600 if (!bSinglePacket)
601 {
602 // Multi packet case
603 dataSent = curPayloadSize;
604 dataRemaining = size - dataSent;
605 headerSize = sizeof(NvU32); // No longer need NVDM header
606 packetPayloadCapacity = fspEmemRmChannelSize - headerSize;
607
608 while (dataRemaining > 0)
609 {
610 NvBool bLastPacket = (dataRemaining <= packetPayloadCapacity);
611 curPayloadSize = (bLastPacket) ? dataRemaining : packetPayloadCapacity;
612
613 portMemSet(pBuffer, 0, fspEmemRmChannelSize);
614 ((NvU32 *)pBuffer)[0] = kfspCreateMctpHeader_HAL(pGpu, pKernelFsp, 0, (NvU8)bLastPacket, seid, (++seq) % 4);
615
616 portMemCopy(pBuffer + headerSize, packetPayloadCapacity,
617 pPayload + dataSent, curPayloadSize);
618
619 status = kfspSendPacket(pGpu, pKernelFsp, pBuffer, curPayloadSize + headerSize);
620 if (status != NV_OK)
621 {
622 goto failed;
623 }
624
625 dataSent += curPayloadSize;
626 dataRemaining -= curPayloadSize;
627 }
628
629 }
630
631 status = kfspPollForResponse(pGpu, pKernelFsp);
632 if (status != NV_OK)
633 {
634 goto failed;
635 }
636 status = kfspReadMessage(pGpu, pKernelFsp, pResponsePayload, responseBufferSize);
637
638 failed:
639 portMemFree(pBuffer);
640
641 return status;
642 }
643