1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 /*!
25  *
26  * @file    kern_fsp_gh100.c
27  * @brief   Provides the implementation for HOPPER chip specific FSP HAL
28  *          interfaces.
29  */
30 #include "gpu/fsp/kern_fsp.h"
31 #include "gpu/fsp/kern_fsp_retval.h"
32 #include "gpu/gsp/kernel_gsp.h"
33 
34 #include "published/hopper/gh100/dev_fsp_pri.h"
35 #include "fsp/fsp_nvdm_format.h"
36 #include "published/hopper/gh100/dev_gc6_island_addendum.h"
37 #include "published/hopper/gh100/dev_falcon_v4.h"
38 #include "published/hopper/gh100/dev_gsp.h"
39 #include "published/hopper/gh100/dev_therm.h"
40 #include "published/hopper/gh100/dev_therm_addendum.h"
41 #include "os/os.h"
42 #include "nvRmReg.h"
43 
44 #if RMCFG_MODULE_ENABLED (FSP)
45 #include "hopper/gh100/dev_gsp.h"
46 #include "gpu/gsp/gsp.h"
47 #include "gsp/memmap.h"
48 #include "objfsp.h"
49 #include "objflcnable.h"
50 #endif
51 
52 /*!
53  * @brief Update command queue head and tail pointers
54  *
55  * @param[in] pGpu       OBJGPU pointer
56  * @param[in] pKernelFsp KernelFsp pointer
57  * @param[in] queueHead  Offset to write to command queue head
58  * @param[in] queueTail  Offset to write to command queue tail
59  */
60 void
61 kfspUpdateQueueHeadTail_GH100
62 (
63     OBJGPU    *pGpu,
64     KernelFsp *pKernelFsp,
65     NvU32      queueHead,
66     NvU32      queueTail
67 )
68 {
69     // The write to HEAD needs to happen after TAIL because it will interrupt FSP
70     GPU_REG_WR32(pGpu, NV_PFSP_QUEUE_TAIL(FSP_EMEM_CHANNEL_RM), queueTail);
71     GPU_REG_WR32(pGpu, NV_PFSP_QUEUE_HEAD(FSP_EMEM_CHANNEL_RM), queueHead);
72 }
73 
74 /*!
75  * @brief Read command queue head and tail pointers
76  *
77  * @param[in]  pGpu       OBJGPU pointer
78  * @param[in]  pKernelFsp KernelFsp pointer
79  * @param[out] pQueueHead Pointer where we write command queue head
80  * @param[out] pQueueTail Pointer where we write command queue tail
81  */
82 void
83 kfspGetQueueHeadTail_GH100
84 (
85     OBJGPU    *pGpu,
86     KernelFsp *pKernelFsp,
87     NvU32     *pQueueHead,
88     NvU32     *pQueueTail
89 )
90 {
91     *pQueueHead = GPU_REG_RD32(pGpu, NV_PFSP_QUEUE_HEAD(FSP_EMEM_CHANNEL_RM));
92     *pQueueTail = GPU_REG_RD32(pGpu, NV_PFSP_QUEUE_TAIL(FSP_EMEM_CHANNEL_RM));
93 }
94 
95 /*!
96  * @brief Update message queue head and tail pointers
97  *
98  * @param[in] pGpu       OBJGPU pointer
99  * @param[in] pKernelFsp KernelFsp pointer
100  * @param[in] msgqHead   Offset to write to message queue head
101  * @param[in] msgqTail   Offset to write to message queue tail
102  */
103 void
104 kfspUpdateMsgQueueHeadTail_GH100
105 (
106     OBJGPU    *pGpu,
107     KernelFsp *pKernelFsp,
108     NvU32      msgqHead,
109     NvU32      msgqTail
110 )
111 {
112     GPU_REG_WR32(pGpu, NV_PFSP_MSGQ_TAIL(FSP_EMEM_CHANNEL_RM), msgqTail);
113     GPU_REG_WR32(pGpu, NV_PFSP_MSGQ_HEAD(FSP_EMEM_CHANNEL_RM), msgqHead);
114 }
115 
116 /*!
117  * @brief Read message queue head and tail pointers
118  *
119  * @param[in]  pGpu       OBJGPU pointer
120  * @param[in]  pKernelFsp KernelFsp pointer
121  * @param[out] pMsgqHead  Pointer where we write message queue head
122  * @param[out] pMsgqTail  Pointer where we write message queue tail
123  */
124 void
125 kfspGetMsgQueueHeadTail_GH100
126 (
127     OBJGPU    *pGpu,
128     KernelFsp *pKernelFsp,
129     NvU32     *pMsgqHead,
130     NvU32     *pMsgqTail
131 )
132 {
133     *pMsgqHead = GPU_REG_RD32(pGpu, NV_PFSP_MSGQ_HEAD(FSP_EMEM_CHANNEL_RM));
134     *pMsgqTail = GPU_REG_RD32(pGpu, NV_PFSP_MSGQ_TAIL(FSP_EMEM_CHANNEL_RM));
135 }
136 
137 /*!
138  * @brief Get size of RM's channel in FSP EMEM
139  *
140  * @param[in] pGpu       OBJGPU pointer
141  * @param[in] pKernelFsp KernelFsp pointer
142  *
143  * @return RM channel size in bytes
144  */
145 NvU32
146 kfspGetRmChannelSize_GH100
147 (
148     OBJGPU    *pGpu,
149     KernelFsp *pKernelFsp
150 )
151 {
152     //
153     // Channel size is hardcoded to 1K for now. Later we will use EMEMR to
154     // properly fetch the lower and higher bounds of the EMEM channel
155     //
156     return FSP_EMEM_CHANNEL_RM_SIZE;
157 }
158 
159 /*!
160  * @brief Retreive SEID based on NVDM type
161  *
162  * For now, SEIDs are only needed for use-cases that send multi-packet RM->FSP
163  * messages. The SEID is used in these cases to route packets to the correct
164  * task as FSP receives them. Single-packet use-cases are given SEID 0.
165  *
166  * @param[in] pGpu       OBJGPU pointer
167  * @param[in] pKernelFsp KernelFsp pointer
168  * @param[in] nvdmType   NVDM message type
169  *
170  * @return SEID corresponding to passed-in NVDM type
171  */
172 NvU8
173 kfspNvdmToSeid_GH100
174 (
175     OBJGPU    *pGpu,
176     KernelFsp *pKernelFsp,
177     NvU8       nvdmType
178 )
179 {
180     NvU8 seid;
181 
182     switch (nvdmType)
183     {
184         case NVDM_TYPE_INFOROM:
185             seid = 1;
186             break;
187         case NVDM_TYPE_HULK:
188         default:
189             seid = 0;
190             break;
191     }
192 
193     return seid;
194 }
195 
196 /*!
197  * @brief Create MCTP header
198  *
199  * @param[in] pGpu       OBJGPU pointer
200  * @param[in] pKernelFsp KernelFsp pointer
201  * @param[in] som        Start of Message flag
202  * @param[in] eom        End of Message flag
203  * @param[in] tag        Message tag
204  * @param[in] seq        Packet sequence number
205  *
206  * @return Constructed MCTP header
207  */
208 NvU32
209 kfspCreateMctpHeader_GH100
210 (
211     OBJGPU    *pGpu,
212     KernelFsp *pKernelFsp,
213     NvU8       som,
214     NvU8       eom,
215     NvU8       seid,
216     NvU8       seq
217 )
218 {
219     return REF_NUM(MCTP_HEADER_SOM,  (som)) |
220            REF_NUM(MCTP_HEADER_EOM,  (eom)) |
221            REF_NUM(MCTP_HEADER_SEID, (seid)) |
222            REF_NUM(MCTP_HEADER_SEQ,  (seq));
223 }
224 
225 /*!
226  * @brief Create NVDM payload header
227  *
228  * @param[in] pGpu       OBJGPU pointer
229  * @param[in] pKernelFsp KernelFsp pointer
230  * @param[in] nvdmType   NVDM type to include in header
231  *
232  * @return Constructed NVDM payload header
233  */
234 NvU32
235 kfspCreateNvdmHeader_GH100
236 (
237     OBJGPU    *pGpu,
238     KernelFsp *pKernelFsp,
239     NvU32      nvdmType
240 )
241 {
242     return REF_DEF(MCTP_MSG_HEADER_TYPE, _VENDOR_PCI) |
243            REF_DEF(MCTP_MSG_HEADER_VENDOR_ID, _NV)    |
244            REF_NUM(MCTP_MSG_HEADER_NVDM_TYPE, (nvdmType));
245 }
246 
247 /*!
248  * @brief Retrieve and validate info in packet's MCTP headers
249  *
250  * @param[in]  pGpu          OBJGPU pointer
251  * @param[in]  pKernelFsp    KernelFsp pointer
252  * @param[in]  pBuffer       Buffer containing packet
253  * @param[in]  size          Size of buffer in bytes
254  * @param[out] pPacketState  Pointer where we write packet state
255  * @param[out] pTag          Pointer where we write packet's MCTP tag
256  *
257  * @return NV_OK or NV_ERR_INVALID_DATA
258  */
259 NV_STATUS
260 kfspGetPacketInfo_GH100
261 (
262     OBJGPU            *pGpu,
263     KernelFsp         *pKernelFsp,
264     NvU8              *pBuffer,
265     NvU32              size,
266     MCTP_PACKET_STATE *pPacketState,
267     NvU8              *pTag
268 )
269 {
270     NvU32 mctpHeader;
271     NvU8  som, eom;
272     NV_STATUS status = NV_OK;
273 
274     mctpHeader = ((NvU32 *)pBuffer)[0];
275 
276     som = REF_VAL(MCTP_HEADER_SOM, mctpHeader);
277     eom = REF_VAL(MCTP_HEADER_EOM, mctpHeader);
278 
279     if ((som == 1) && (eom == 0))
280     {
281         *pPacketState = MCTP_PACKET_STATE_START;
282     }
283     else if ((som == 0) && (eom == 1))
284     {
285         *pPacketState = MCTP_PACKET_STATE_END;
286     }
287     else if ((som == 1) && (eom == 1))
288     {
289         *pPacketState = MCTP_PACKET_STATE_SINGLE_PACKET;
290     }
291     else
292     {
293         *pPacketState = MCTP_PACKET_STATE_INTERMEDIATE;
294     }
295 
296     if ((*pPacketState == MCTP_PACKET_STATE_START) ||
297         (*pPacketState == MCTP_PACKET_STATE_SINGLE_PACKET))
298     {
299         // Packet contains payload header, check it
300         status = kfspValidateMctpPayloadHeader_HAL(pGpu, pKernelFsp, pBuffer, size);
301     }
302 
303     *pTag = REF_VAL(MCTP_HEADER_TAG, mctpHeader);
304 
305     return status;
306 }
307 
308 /*!
309  * @brief Validate packet's MCTP payload header
310  *
311  * @param[in] pGpu       OBJGPU pointer
312  * @param[in] pKernelFsp KernelFsp pointer
313  * @param[in] pBuffer    Buffer containing packet
314  * @param[in] size       Size of buffer in bytes
315  *
316  * @return NV_OK or NV_ERR_INVALID_DATA
317  */
318 NV_STATUS
319 kfspValidateMctpPayloadHeader_GH100
320 (
321     OBJGPU    *pGpu,
322     KernelFsp *pKernelFsp,
323     NvU8      *pBuffer,
324     NvU32     size
325 )
326 {
327     NvU32 mctpPayloadHeader;
328     NvU16 mctpVendorId;
329     NvU8  mctpMessageType;
330 
331     mctpPayloadHeader = ((NvU32 *)pBuffer)[1];
332 
333     mctpMessageType = REF_VAL(MCTP_MSG_HEADER_TYPE, mctpPayloadHeader);
334     if (mctpMessageType != MCTP_MSG_HEADER_TYPE_VENDOR_PCI)
335     {
336         NV_PRINTF(LEVEL_ERROR, "Invalid MCTP Message type 0x%0x, expecting 0x7e (Vendor Defined PCI)\n",
337                   mctpMessageType);
338         return NV_ERR_INVALID_DATA;
339     }
340 
341     mctpVendorId = REF_VAL(MCTP_MSG_HEADER_VENDOR_ID, mctpPayloadHeader);
342     if (mctpVendorId != MCTP_MSG_HEADER_VENDOR_ID_NV)
343     {
344         NV_PRINTF(LEVEL_ERROR, "Invalid PCI Vendor Id 0x%0x, expecting 0x10de (Nvidia)\n",
345                   mctpVendorId);
346         return NV_ERR_INVALID_DATA;
347     }
348 
349     if (size < (sizeof(MCTP_HEADER) + sizeof(NvU8)))
350     {
351         NV_PRINTF(LEVEL_ERROR, "Packet doesn't contain NVDM type in payload header\n");
352         return NV_ERR_INVALID_DATA;
353     }
354 
355     return NV_OK;
356 }
357 
358 /*!
359  * @brief Process NVDM payload
360  *
361  * @param[in] pGpu       OBJGPU pointer
362  * @param[in] pKernelFsp KernelFsp pointer
363  * @param[in] pBuffer    Buffer containing packet data
364  * @param[in] Size       Buffer size
365  *
366  * @return NV_OK or NV_ERR_NOT_SUPPORTED
367  */
368 NV_STATUS
369 kfspProcessNvdmMessage_GH100
370 (
371     OBJGPU    *pGpu,
372     KernelFsp *pKernelFsp,
373     NvU8      *pBuffer,
374     NvU32      size
375 )
376 {
377     NvU8 nvdmType;
378     NV_STATUS status = NV_OK;
379 
380     nvdmType = pBuffer[0];
381 
382     switch (nvdmType)
383     {
384         case NVDM_TYPE_FSP_RESPONSE:
385         case NVDM_TYPE_SMBPBI:
386             status = kfspProcessCommandResponse_HAL(pGpu, pKernelFsp, pBuffer, size);
387             break;
388         default:
389             NV_PRINTF(LEVEL_ERROR, "Unknown or unsupported NVDM type received: 0x%0x\n",
390                       nvdmType);
391             status = NV_ERR_NOT_SUPPORTED;
392             break;
393     }
394 
395     return status;
396 }
397 
398 /*!
399  * @brief Process FSP command response
400  *
401  * @param[in] pGpu       OBJGPU pointer
402  * @param[in] pKernelFsp KernelFsp pointer
403  * @param[in] pBuffer    Buffer containing packet data
404  * @param[in] Size       Buffer size
405  *
406  * @return NV_OK or NV_ERR_INVALID_DATA
407  */
408 NV_STATUS
409 kfspProcessCommandResponse_GH100
410 (
411     OBJGPU    *pGpu,
412     KernelFsp *pKernelFsp,
413     NvU8      *pBuffer,
414     NvU32      size
415 )
416 {
417     NVDM_PAYLOAD_COMMAND_RESPONSE *pCmdResponse;
418     NvU32 headerSize = sizeof(NvU8); // NVDM type
419     NV_STATUS status = NV_OK;
420 
421     if (size < (headerSize + sizeof(NVDM_PAYLOAD_COMMAND_RESPONSE)))
422     {
423         NV_PRINTF(LEVEL_ERROR, "Expected FSP command response, but packet is not big enough for payload. Size: 0x%0x\n", size);
424         return NV_ERR_INVALID_DATA;
425     }
426 
427     pCmdResponse = (NVDM_PAYLOAD_COMMAND_RESPONSE *)&(pBuffer[1]);
428 
429     NV_PRINTF(LEVEL_INFO, "Received FSP command response. Task ID: 0x%0x Command type: 0x%0x Error code: 0x%0x\n",
430               pCmdResponse->taskId, pCmdResponse->commandNvdmType, pCmdResponse->errorCode);
431 
432     status = kfspErrorCode2NvStatusMap_HAL(pGpu, pKernelFsp, pCmdResponse->errorCode);
433     if (status == NV_OK)
434     {
435         NV_PRINTF(LEVEL_INFO, "Last command was processed by FSP successfully!\n");
436     }
437     else if (status != NV_ERR_OBJECT_NOT_FOUND)
438     {
439 
440         NV_PRINTF(LEVEL_ERROR, "FSP response reported error. Task ID: 0x%0x Command type: 0x%0x Error code: 0x%0x\n",
441                 pCmdResponse->taskId, pCmdResponse->commandNvdmType, pCmdResponse->errorCode);
442     }
443 
444     return status;
445 }
446 
447 /*!
448  * @brief Configure EMEMC for RM's queue in FSP EMEM
449  *
450  * @param[in] pGpu       OBJGPU pointer
451  * @param[in] pKernelFsp KernelFsp pointer
452  * @param[in] offset     Offset to write to EMEMC in DWORDS
453  * @param[in] bAincw     Flag to set auto-increment on writes
454  * @param[in] bAincr     Flag to set auto-increment on reads
455  *
456  * @return NV_OK
457  */
458 NV_STATUS
459 kfspConfigEmemc_GH100
460 (
461     OBJGPU    *pGpu,
462     KernelFsp *pKernelFsp,
463     NvU32      offset,
464     NvBool     bAincw,
465     NvBool     bAincr
466 )
467 {
468     NvU32 offsetBlks, offsetDwords;
469     NvU32 reg32 = 0;
470 
471     //
472     // EMEMC offset is encoded in terms of blocks (64 DWORDS) and DWORD offset
473     // within a block, so calculate each.
474     //
475     offsetBlks = offset / 64;
476     offsetDwords = offset % 64;
477 
478     reg32 = FLD_SET_DRF_NUM(_PFSP, _EMEMC, _OFFS, offsetDwords, reg32);
479     reg32 = FLD_SET_DRF_NUM(_PFSP, _EMEMC, _BLK, offsetBlks, reg32);
480 
481     if (bAincw)
482     {
483         reg32 = FLD_SET_DRF(_PFSP, _EMEMC, _AINCW, _TRUE, reg32);
484     }
485     if (bAincr)
486     {
487         reg32 = FLD_SET_DRF(_PFSP, _EMEMC, _AINCR, _TRUE, reg32);
488     }
489 
490     GPU_REG_WR32(pGpu, NV_PFSP_EMEMC(FSP_EMEM_CHANNEL_RM), reg32);
491     return NV_OK;
492 }
493 
494 /*!
495  * @brief Write data in buffer to RM channel in FSP's EMEM
496  *
497  * @param[in] pGpu       OBJGPU pointer
498  * @param[in] pKernelFsp KernelFsp pointer
499  * @param[in] pBuffer    Buffer with data to write to EMEM
500  * @param[in] Size       Size of buffer in bytes, assumed DWORD aligned
501  *
502  * @return NV_OK
503  */
504 NV_STATUS
505 kfspWriteToEmem_GH100
506 (
507     OBJGPU    *pGpu,
508     KernelFsp *pKernelFsp,
509     NvU8      *pBuffer,
510     NvU32      size
511 )
512 {
513     NvU32 i, reg32;
514     NvU32 ememOffsetEnd;
515 
516     //
517     // First configure EMEMC, RM always writes 0 to the offset, which is OK
518     // because RM's channel starts at 0 on GH100 and we always start from the
519     // beginning for each packet. It should be improved later to use EMEMR to
520     // properly fetch the lower and higher bounds of the EMEM channel
521     //
522     kfspConfigEmemc_HAL(pGpu, pKernelFsp, 0, NV_TRUE, NV_FALSE);
523 
524     NV_PRINTF(LEVEL_INFO, "About to send data to FSP, ememcOff=0, size=0x%x\n", size);
525     if (!NV_IS_ALIGNED(size, sizeof(NvU32)))
526     {
527         NV_PRINTF(LEVEL_WARNING, "Size=0x%x is not DWORD-aligned, data will be truncated!\n", size);
528     }
529 
530     // Now write to EMEMD
531     for (i = 0; i < (size / 4); i++)
532     {
533         GPU_REG_WR32(pGpu, NV_PFSP_EMEMD(FSP_EMEM_CHANNEL_RM), ((NvU32*)(void*)pBuffer)[i]);
534     }
535 
536     // Sanity check offset. If this fails, the autoincrement did not work
537     reg32 = GPU_REG_RD32(pGpu, NV_PFSP_EMEMC(FSP_EMEM_CHANNEL_RM));
538     ememOffsetEnd = DRF_VAL(_PFSP, _EMEMC, _OFFS, reg32);
539 
540     // Blocks are 64 DWORDS
541     ememOffsetEnd += DRF_VAL(_PFSP, _EMEMC, _BLK, reg32) * 64;
542     NV_PRINTF(LEVEL_INFO, "After sending data, ememcOff = 0x%x\n", ememOffsetEnd);
543 
544     NV_ASSERT((ememOffsetEnd) == (size / sizeof(NvU32)));
545     return NV_OK;
546 }
547 
548 /*!
549  * @brief Read data to buffer from RM channel in FSP's EMEM
550  *
551  * @param[in]     pGpu       OBJGPU pointer
552  * @param[in]     pKernelFsp KernelFsp pointer
553  * @param[in/out] pBuffer    Buffer where we copy data from EMEM
554  * @param[in]     size       Size to read in bytes, assumed DWORD aligned
555  *
556  * @return NV_OK
557  */
558 NV_STATUS
559 kfspReadFromEmem_GH100
560 (
561     OBJGPU    *pGpu,
562     KernelFsp *pKernelFsp,
563     NvU8      *pBuffer,
564     NvU32      size
565 )
566 {
567     NvU32 i, reg32;
568     NvU32 ememOffsetEnd;
569 
570     //
571     // First configure EMEMC, RM always writes 0 to the offset, which is OK
572     // because RM's channel starts at 0 on GH100 and we always start from the
573     // beginning for each packet. It should be improved later to use EMEMR to
574     // properly fetch the lower and higher bounds of the EMEM channel
575     //
576     kfspConfigEmemc_HAL(pGpu, pKernelFsp, 0, NV_FALSE, NV_TRUE);
577 
578     NV_PRINTF(LEVEL_INFO, "About to read data from FSP, ememcOff=0, size=0x%x\n", size);
579     if (!NV_IS_ALIGNED(size, sizeof(NvU32)))
580     {
581         NV_PRINTF(LEVEL_WARNING, "Size=0x%x is not DWORD-aligned, data will be truncated!\n", size);
582     }
583 
584     // Now read from EMEMD
585     for (i = 0; i < (size / 4); i++)
586     {
587         ((NvU32*)(void*)pBuffer)[i] = GPU_REG_RD32(pGpu, NV_PFSP_EMEMD(FSP_EMEM_CHANNEL_RM));
588     }
589 
590     // Sanity check offset. If this fails, the autoincrement did not work
591     reg32 = GPU_REG_RD32(pGpu, NV_PFSP_EMEMC(FSP_EMEM_CHANNEL_RM));
592     ememOffsetEnd = DRF_VAL(_PFSP, _EMEMC, _OFFS, reg32);
593 
594     // Blocks are 64 DWORDS
595     ememOffsetEnd += DRF_VAL(_PFSP, _EMEMC, _BLK, reg32) * 64;
596     NV_PRINTF(LEVEL_INFO, "After reading data, ememcOff = 0x%x\n", ememOffsetEnd);
597 
598     NV_ASSERT((ememOffsetEnd) == (size / sizeof(NvU32)));
599     return NV_OK;
600 }
601 
602 NV_STATUS
603 kfspWaitForSecureBoot_GH100
604 (
605     OBJGPU    *pGpu,
606     KernelFsp *pKernelFsp
607 )
608 {
609     NV_STATUS status  = NV_OK;
610     RMTIMEOUT timeout;
611 
612     //
613     // Polling for FSP boot complete
614     // In Hopper, Bootfsm triggers FSP execution out of chip reset.
615     // FSP writes 0xFF value in NV_THERM_I2CS_SCRATCH register after completion of boot
616     // FBFalcon training during devinit alone takes 2 seconds, up to 3 on HBM3,
617     // but the default threadstate timeout on windows is 1800 ms. Increase to 4 seconds
618     // for this wait to match MODS GetGFWBootTimeoutMs.
619     // For flags, this must not timeout due to aforementioned threadstate timeout,
620     // and we must not use the GPU TMR since it is inaccessible.
621     //
622     gpuSetTimeout(pGpu, NV_MAX(gpuScaleTimeout(pGpu, 4000000), pGpu->timeoutData.defaultus),
623                   &timeout, GPU_TIMEOUT_FLAGS_OSTIMER | GPU_TIMEOUT_FLAGS_BYPASS_THREAD_STATE);
624 
625     while(!GPU_FLD_TEST_DRF_DEF(pGpu, _THERM_I2CS_SCRATCH, _FSP_BOOT_COMPLETE, _STATUS, _SUCCESS))
626     {
627         status = gpuCheckTimeout(pGpu, &timeout);
628         if (status == NV_ERR_TIMEOUT)
629         {
630             NV_PRINTF(LEVEL_ERROR,
631                       "Timout while polling for FSP boot complete I2CS_SCRATCH : %x\n",
632                       GPU_REG_RD32(pGpu, NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE));
633             break;
634         }
635     }
636 
637     return status;
638 }
639 
640 /*!
641  * @brief Check if GSP-FMC Inst_in_sys ucode needs to be booted.
642  *
643  * @param[in]  pGpu          OBJGPU pointer
644  * @param[in]  pKernelFsp    KernelFsp pointer
645  *
646  * @return NV_TRUE if GSP Inst_in_sys FMC needs to be booted, or NV_FALSE otherwise
647  */
648 NvBool
649 kfspCheckGspSecureScratch_GH100
650 (
651     OBJGPU    *pGpu,
652     KernelFsp *pKernelFsp
653 )
654 {
655 
656     return NV_FALSE;
657 }
658 
659 static const BINDATA_ARCHIVE *
660 kfspGetGspUcodeArchive
661 (
662     OBJGPU    *pGpu,
663     KernelFsp *pKernelFsp
664 )
665 {
666     KernelGsp *pKernelGsp                 = GPU_GET_KERNEL_GSP(pGpu);
667 
668     if (pKernelFsp->getProperty(pKernelFsp, PDB_PROP_KFSP_GSP_MODE_GSPRM))
669     {
670         NV_PRINTF(LEVEL_ERROR, "Loading GSP-RM image using FSP.\n");
671 
672         if (kgspIsDebugModeEnabled_HAL(pGpu, pKernelGsp))
673         {
674             {
675                 return kgspGetBinArchiveGspRmFmcGfwDebugSigned_HAL(pKernelGsp);
676             }
677         }
678         else
679         {
680             return kgspGetBinArchiveGspRmFmcGfwProdSigned_HAL(pKernelGsp);
681         }
682     }
683 #if RMCFG_MODULE_ENABLED (GSP)
684     else
685     {
686         Gsp *pGsp = GPU_GET_GSP(pGpu);
687 
688         // Intentional error print so that we know which mode RM is loaded with
689         NV_PRINTF(LEVEL_ERROR, "Loading GSP image for monolithic RM using FSP.\n");
690         if (gspIsDebugModeEnabled_HAL(pGpu, pGsp))
691         {
692             if (kfspCheckGspSecureScratch_HAL(pGpu, pKernelFsp))
693             {
694                 return gspGetBinArchiveGspFmcInstInSysGfwDebugSigned_HAL(pGsp);
695             }
696             else
697             {
698                 //
699                 // Non Resident (GspCcGfw)Image will have just the FMC in it.
700                 // When GSP-RM is not enabled, we will need to load GSP RM Proxy.
701                 // We will prepare the GSP-Proxy Image in SYSMEM And pass that
702                 // into to the FMC. FMC will then boot the RM Proxy.
703                 //
704                 NV_ASSERT_OR_RETURN(gspSetupRMProxyImage(pGpu, pGsp) == NV_OK, NULL);
705 
706                 {
707                     return gspGetBinArchiveGspFmcGfwDebugSigned_HAL(pGsp);
708                 }
709             }
710         }
711         else
712         {
713             if (kfspCheckGspSecureScratch_HAL(pGpu, pKernelFsp))
714             {
715                 return gspGetBinArchiveGspFmcInstInSysGfwProdSigned_HAL(pGsp);
716             }
717             else
718             {
719                 NV_ASSERT_OR_RETURN(gspSetupRMProxyImage(pGpu, pGsp) == NV_OK, NULL);
720                 {
721                     return gspGetBinArchiveGspFmcGfwProdSigned_HAL(pGsp);
722                 }
723             }
724         }
725     }
726 #endif
727 
728     //
729     // It does not make sense to boot monolithic RM when physical FSP module
730     // does not exist
731     //
732     return NULL;
733 }
734 
735 static NV_STATUS
736 kfspGetGspBootArgs
737 (
738     OBJGPU     *pGpu,
739     KernelFsp  *pKernelFsp,
740     RmPhysAddr *pBootArgsGspSysmemOffset
741 )
742 {
743     NV_STATUS status         = NV_OK;
744 
745     if (pKernelFsp->getProperty(pKernelFsp, PDB_PROP_KFSP_GSP_MODE_GSPRM))
746     {
747         KernelGsp *pKernelGsp = GPU_GET_KERNEL_GSP(pGpu);
748 
749         // Ensure bootArgs have been set up before
750         NV_ASSERT_OR_RETURN(pKernelGsp->pGspFmcArgumentsCached != NULL, NV_ERR_INVALID_STATE);
751         NV_ASSERT_OR_RETURN(memdescGetAddressSpace(pKernelGsp->pGspFmcArgumentsDescriptor) == ADDR_SYSMEM, NV_ERR_INVALID_STATE);
752         *pBootArgsGspSysmemOffset = memdescGetPhysAddr(pKernelGsp->pGspFmcArgumentsDescriptor, AT_GPU, 0);
753     }
754 
755     return status;
756 }
757 
758 
759 /*!
760  * @brief Set up GSP-FMC and boot args for FSP command
761  *
762  * @param[in] pGpu       OBJGPU pointer
763  * @param[in] pKernelFsp KernelFsp pointer
764  *
765  * @return NV_OK, or error if failed
766  */
767 static NV_STATUS
768 kfspSetupGspImages
769 (
770     OBJGPU           *pGpu,
771     KernelFsp        *pKernelFsp,
772     NVDM_PAYLOAD_COT *pCotPayload
773 )
774 {
775     NV_STATUS status = NV_OK;
776 
777     const BINDATA_ARCHIVE *pBinArchive;
778     PBINDATA_STORAGE pGspImage;
779     PBINDATA_STORAGE pGspImageHash;
780     PBINDATA_STORAGE pGspImageSignature;
781     PBINDATA_STORAGE pGspImagePublicKey;
782     NvU32 pGspImageSize;
783     NvU32 pGspImageMapSize;
784     NvP64 pVaKernel = NULL;
785     NvP64 pPrivKernel = NULL;
786     NvU64 flags = MEMDESC_FLAGS_NONE;
787 
788     //
789     // On systems with SEV enabled, the GSP-FMC image has to be accessible
790     // to FSP (an unit inside GPU) and hence placed in unprotected sysmem
791     //
792 
793     // Detect the mode of operation for GSP and fetch the right image to boot
794     pBinArchive = kfspGetGspUcodeArchive(pGpu, pKernelFsp);
795     if (pBinArchive == NULL)
796     {
797         NV_PRINTF(LEVEL_ERROR, "Cannot find correct ucode archive for booting!\n");
798         status = NV_ERR_OBJECT_NOT_FOUND;
799         goto failed;
800     }
801 
802     // Set up the structures to send GSP-FMC
803     pGspImage = (PBINDATA_STORAGE)bindataArchiveGetStorage(pBinArchive, "ucode_image");
804     pGspImageHash = (PBINDATA_STORAGE)bindataArchiveGetStorage(pBinArchive, "ucode_hash");
805     pGspImageSignature = (PBINDATA_STORAGE)bindataArchiveGetStorage(pBinArchive, "ucode_sig");
806     pGspImagePublicKey = (PBINDATA_STORAGE)bindataArchiveGetStorage(pBinArchive, "ucode_pkey");
807 
808     if ((pGspImage == NULL) || (pGspImageHash == NULL) ||
809         (pGspImageSignature == NULL) || (pGspImagePublicKey == NULL))
810     {
811         status = NV_ERR_NOT_SUPPORTED;
812         goto failed;
813     }
814 
815     pGspImageSize = bindataGetBufferSize(pGspImage);
816     pGspImageMapSize = NV_ALIGN_UP(pGspImageSize, 0x1000);
817 
818     status = memdescCreate(&pKernelFsp->pGspFmcMemdesc, pGpu, pGspImageMapSize,
819                            0, NV_TRUE, ADDR_SYSMEM, NV_MEMORY_UNCACHED, flags);
820     NV_ASSERT_OR_GOTO(status == NV_OK, failed);
821 
822     status = memdescAlloc(pKernelFsp->pGspFmcMemdesc);
823     NV_ASSERT_OR_GOTO(status == NV_OK, failed);
824 
825     status = memdescMap(pKernelFsp->pGspFmcMemdesc, 0, pGspImageMapSize, NV_TRUE,
826                         NV_PROTECT_READ_WRITE, &pVaKernel, &pPrivKernel);
827     NV_ASSERT_OR_GOTO(status == NV_OK, failed);
828 
829     portMemSet(pVaKernel, 0, pGspImageMapSize);
830 
831     status = bindataWriteToBuffer(pGspImage, pVaKernel, pGspImageSize);
832     NV_ASSERT_OR_GOTO(status == NV_OK, failed);
833 
834     // Clean up CPU side resources since they are not needed anymore
835     memdescUnmap(pKernelFsp->pGspFmcMemdesc, NV_TRUE, 0, pVaKernel, pPrivKernel);
836 
837     pCotPayload->gspFmcSysmemOffset = memdescGetPhysAddr(pKernelFsp->pGspFmcMemdesc, AT_GPU, 0);
838 
839     status = bindataWriteToBuffer(pGspImageHash, (NvU8*)pCotPayload->hash384, sizeof(pCotPayload->hash384));
840     NV_ASSERT_OR_GOTO(status == NV_OK, failed);
841 
842     status = bindataWriteToBuffer(pGspImageSignature, (NvU8*)pCotPayload->signature, sizeof(pCotPayload->signature));
843     NV_ASSERT_OR_GOTO(status == NV_OK, failed);
844 
845     status = bindataWriteToBuffer(pGspImagePublicKey, (NvU8*)pCotPayload->publicKey, sizeof(pCotPayload->publicKey));
846     NV_ASSERT_OR_GOTO(status == NV_OK, failed);
847 
848     // Set up boot args based on the mode of operation
849     status = kfspGetGspBootArgs(pGpu, pKernelFsp, &pCotPayload->gspBootArgsSysmemOffset);
850     NV_ASSERT_OR_GOTO(status == NV_OK, failed);
851 
852     return NV_OK;
853 
854 failed:
855     memdescDestroy(pKernelFsp->pGspFmcMemdesc);
856     pKernelFsp->pGspFmcMemdesc = NULL;
857 
858     return status;
859 }
860 
861 /*!
862  * Determine if PRIV target mask is unlocked for GSP and BAR0 Decoupler allows GSP access.
863  *
864  * This is temporary WAR for the PRIV target mask bug 3640831 until we have notification
865  * protocol in place (there is no HW mechanism for CPU to check if GSP is open other than
866  * reading 0xBADF41YY code).
867  *
868  * Until the programmed BAR0 decoupler settings are cleared, GSP access is blocked from
869  * the CPU so all reads will return 0.
870  */
871 static NvBool
872 _kfspIsGspTargetMaskReleased
873 (
874     OBJGPU  *pGpu,
875     void    *pVoid
876 )
877 {
878     const NvU32   privErrTargetLocked      = 0xBADF4100U;
879     const NvU32   privErrTargetLockedMask  = 0xFFFFFF00U; // Ignore LSB - it has extra error information
880     NvU32 reg;
881 
882     //
883     // This register is read with the raw OS read to avoid the 0xbadf sanity checking
884     // done by the usual register read utilities.
885     //
886     reg = osDevReadReg032(pGpu, gpuGetDeviceMapping(pGpu, DEVICE_INDEX_GPU, 0),
887                           DRF_BASE(NV_PGSP) + NV_PFALCON_FALCON_HWCFG2);
888 
889     return ((reg != 0) && ((reg & privErrTargetLockedMask) != privErrTargetLocked));
890 }
891 
892 /*!
893  * Determine if GSP's target mask is released.
894  */
895 NV_STATUS
896 kfspWaitForGspTargetMaskReleased_GH100
897 (
898     OBJGPU    *pGpu,
899     KernelFsp *pKernelFsp
900 )
901 {
902     NV_STATUS status = NV_OK;
903 
904     status =  gpuTimeoutCondWait(pGpu, _kfspIsGspTargetMaskReleased, NULL, NULL);
905 
906     return status;
907 }
908 
909 static NV_STATUS
910 _kfspCheckGspBootStatus
911 (
912     OBJGPU    *pGpu,
913     KernelFsp *pKernelFsp
914 )
915 {
916 #if RMCFG_MODULE_ENABLED (FSP)
917     NV_STATUS status = NV_OK;
918     RMTIMEOUT timeout;
919     Gsp *pGsp = GPU_GET_GSP(pGpu);
920     Falcon *pFlcn = ENG_GET_FLCN(pGsp);
921 
922     pFlcn->bBootstrapped = NV_TRUE;
923 
924     // In Inst_in_sys mode GSP-FMC will write status to NV_PGSP_MAILBOX(0).
925     if (kfspCheckGspSecureScratch_HAL(pGpu, pKernelFsp))
926     {
927         gpuSetTimeout(pGpu, GPU_TIMEOUT_DEFAULT, &timeout, GPU_TIMEOUT_FLAGS_OSTIMER | GPU_TIMEOUT_FLAGS_BYPASS_THREAD_STATE);
928         while(FLD_TEST_DRF_NUM(_PGSP, _MAILBOX, _DATA, GSP_INST_IN_SYS_COMPLETION_STATUS_IN_PROGRESS , GPU_REG_RD32(pGpu, NV_PGSP_MAILBOX(0))))
929         {
930             status = gpuCheckTimeout(pGpu, &timeout);
931             if (status == NV_ERR_TIMEOUT)
932             {
933                 NV_PRINTF(LEVEL_ERROR, "Timed out waiting for GSP Inst_in_sys ucode to boot\n");
934                 DBG_BREAKPOINT();
935                 break;
936             }
937             osSpinLoop();
938          }
939          // Read GSP mailbox to check if it is booted successfully.
940          if (GPU_REG_RD32(pGpu, NV_PGSP_MAILBOX(0)) != GSP_INST_IN_SYS_COMPLETION_STATUS_OK)
941          {
942                 NV_PRINTF(LEVEL_ERROR, "GSP Inst_in_sys ucode boot failed with GSP status 0x%x\n", GPU_REG_RD32(pGpu, NV_PGSP_MAILBOX(0)));
943                 DBG_BREAKPOINT();
944          }
945          return status;
946      }
947 
948     // Ensure that for GH100+ TM is released before polling for priv lockdown release
949     status = kfspWaitForGspTargetMaskReleased_HAL(pGpu, pKernelFsp);
950     if (status != NV_OK)
951     {
952         NV_PRINTF(LEVEL_ERROR, "Timed out waiting for GSP's target mask to be released.\n");
953         DBG_BREAKPOINT();
954         return status;
955     }
956 
957     // Ensure priv lockdown is released before polling interrupts
958     gpuSetTimeout(pGpu, GPU_TIMEOUT_DEFAULT, &timeout, GPU_TIMEOUT_FLAGS_OSTIMER | GPU_TIMEOUT_FLAGS_BYPASS_THREAD_STATE);
959     do
960     {
961         if (flcnIsRiscvLockdownReleased_HAL(pGpu, pFlcn))
962         {
963             status = NV_OK;
964             break;
965         }
966 
967         status = gpuCheckTimeout(pGpu, &timeout);
968         if (status == NV_ERR_TIMEOUT)
969         {
970             NV_PRINTF(LEVEL_ERROR, "Timed out waiting for priv lockdown release.\n");
971             DBG_BREAKPOINT();
972             break;
973         }
974         osSpinLoop();
975     } while (1);
976 
977     // Ensure GSP can send back init ack interrupt to CPU
978     do
979     {
980         gspServiceEarlyInterrupt_HAL(pGpu, pGsp);
981 
982         if (pFlcn->bOSReady)
983         {
984             status = NV_OK;
985             break;
986         }
987 
988         status = gpuCheckTimeout(pGpu, &timeout);
989         if (status == NV_ERR_TIMEOUT)
990         {
991             NV_PRINTF(LEVEL_ERROR, "Timed out waiting for GSP ucode to boot.\n");
992             DBG_BREAKPOINT();
993             break;
994         }
995         osSpinLoop();
996     } while (1);
997 
998     return status;
999 #else
1000     // On GSP-RM, the kgsp code path will check for GSP boot status
1001     return NV_OK;
1002 #endif
1003 }
1004 
1005 /*!
1006  * @brief Dump debug registers for FSP
1007  *
1008  * @param[in] pGpu       OBJGPU pointer
1009  * @param[in] pKernelFsp KernelFsp pointer
1010  *
1011  * @return NV_OK, or error if failed
1012  */
1013 void
1014 kfspDumpDebugState_GH100
1015 (
1016     OBJGPU    *pGpu,
1017     KernelFsp *pKernelFsp
1018 )
1019 {
1020     NV_PRINTF(LEVEL_ERROR, "NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_2(0) = 0x%x\n",
1021               GPU_REG_RD32(pGpu, NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_2(0)));
1022     NV_PRINTF(LEVEL_ERROR, "NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_2(1) = 0x%x\n",
1023               GPU_REG_RD32(pGpu, NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_2(1)));
1024     NV_PRINTF(LEVEL_ERROR, "NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_2(2) = 0x%x\n",
1025               GPU_REG_RD32(pGpu, NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_2(2)));
1026     NV_PRINTF(LEVEL_ERROR, "NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_2(3) = 0x%x\n",
1027               GPU_REG_RD32(pGpu, NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_2(3)));
1028 }
1029 
1030 /*!
1031  * @brief Checks whether GSP_FMC is enforced on this
1032  *
1033  * @param[in] pGpu       OBJGPU pointer
1034  * @param[in] pKernelFsp KernelFsp pointer
1035  *
1036  * @return NV_TRUE, if GspFmc is enforced.
1037  */
1038 NvBool
1039 kfspGspFmcIsEnforced_GH100
1040 (
1041     OBJGPU    *pGpu,
1042     KernelFsp *pKernelFsp
1043 )
1044 {
1045     return NV_TRUE;
1046 }
1047 
1048 /*!
1049  * @brief Send GSP-FMC and FRTS info to FSP
1050  *
1051  * @param[in] pGpu       OBJGPU pointer
1052  * @param[in] pKernelFsp KernelFsp pointer
1053  *
1054  * @return NV_OK, or error if failed
1055  */
1056 NV_STATUS
1057 kfspSendBootCommands_GH100
1058 (
1059     OBJGPU    *pGpu,
1060     KernelFsp *pKernelFsp
1061 )
1062 {
1063     NV_STATUS status = NV_OK;
1064     NV_STATUS statusBoot = NV_OK;
1065     NvU32 frtsSize = 0;
1066     NVDM_PAYLOAD_COT *pCotPayload = NULL;
1067     NvP64 pVaKernel = NULL;
1068     NvP64 pPrivKernel = NULL;
1069 
1070     if (!IS_EMULATION(pGpu) && !IS_SILICON(pGpu))
1071     {
1072         //
1073         // FSP managment partition is only enabled when secure boot is enabled
1074         // on silicon and certain emulation configs
1075         //
1076         return NV_OK;
1077     }
1078 
1079     if (pKernelFsp->getProperty(pKernelFsp, PDB_PROP_KFSP_IS_MISSING))
1080     {
1081         if (IS_SILICON(pGpu))
1082         {
1083             NV_PRINTF(LEVEL_ERROR, "RM cannot boot with FSP missing on silicon.\n");
1084             return NV_ERR_NOT_SUPPORTED;
1085         }
1086 
1087         NV_PRINTF(LEVEL_WARNING, "Secure boot is disabled due to missing FSP.\n");
1088         return NV_OK;
1089     }
1090 
1091     if (pKernelFsp->getProperty(pKernelFsp, PDB_PROP_KFSP_BOOT_COMMAND_OK))
1092     {
1093         NV_PRINTF(LEVEL_ERROR, "Cannot send FSP boot commands multiple times.\n");
1094         return NV_ERR_NOT_SUPPORTED;
1095     }
1096 
1097     // Confirm FSP secure boot partition is done
1098     statusBoot = kfspWaitForSecureBoot_HAL(pGpu, pKernelFsp);
1099 
1100     if (statusBoot != NV_OK)
1101     {
1102         NV_PRINTF(LEVEL_ERROR, "FSP secure boot partition timed out.\n");
1103         return statusBoot;
1104     }
1105 
1106     // Enforce GSP-FMC can only be booted by FSP on silicon.
1107     if (IS_SILICON(pGpu) &&
1108         kfspGspFmcIsEnforced_HAL(pGpu, pKernelFsp) &&
1109         pKernelFsp->getProperty(pKernelFsp, PDB_PROP_KFSP_DISABLE_GSPFMC))
1110     {
1111         NV_PRINTF(LEVEL_ERROR, "Chain-of-trust (GSP-FMC) cannot be disabled on silicon.\n");
1112         return NV_ERR_NOT_SUPPORTED;
1113     }
1114 
1115     if (pKernelFsp->getProperty(pKernelFsp, PDB_PROP_KFSP_DISABLE_FRTS_SYSMEM) &&
1116         pKernelFsp->getProperty(pKernelFsp, PDB_PROP_KFSP_DISABLE_FRTS_VIDMEM) &&
1117         pKernelFsp->getProperty(pKernelFsp, PDB_PROP_KFSP_DISABLE_GSPFMC))
1118     {
1119         NV_PRINTF(LEVEL_WARNING, "Chain-of-trust is disabled via regkey\n");
1120         pKernelFsp->setProperty(pKernelFsp, PDB_PROP_KFSP_BOOT_COMMAND_OK, NV_TRUE);
1121         return NV_OK;
1122     }
1123 
1124     pCotPayload = portMemAllocNonPaged(sizeof(NVDM_PAYLOAD_COT));
1125     NV_CHECK_OR_RETURN(LEVEL_ERROR, pCotPayload != NULL, NV_ERR_NO_MEMORY);
1126     portMemSet(pCotPayload, 0, sizeof(NVDM_PAYLOAD_COT));
1127 
1128     frtsSize = NV_PGC6_AON_FRTS_INPUT_WPR_SIZE_SECURE_SCRATCH_GROUP_03_0_WPR_SIZE_1MB_IN_4K << 12;
1129     NV_ASSERT(frtsSize != 0);
1130 
1131     pCotPayload->version = 1;
1132     pCotPayload->size = sizeof(NVDM_PAYLOAD_COT);
1133 
1134     // Set up sysmem for FRTS copy
1135     if (!pKernelFsp->getProperty(pKernelFsp, PDB_PROP_KFSP_DISABLE_FRTS_SYSMEM))
1136     {
1137         NvU64 flags = MEMDESC_FLAGS_NONE;
1138 
1139         //
1140         // On systems with SEV enabled, the FRTS has to be accessible to
1141         // FSP (an unit inside GPU) and hence placed in unprotected sysmem
1142         //
1143         status = memdescCreate(&pKernelFsp->pSysmemFrtsMemdesc, pGpu, frtsSize,
1144                                0, NV_TRUE, ADDR_SYSMEM, NV_MEMORY_UNCACHED, flags);
1145         NV_ASSERT_OR_GOTO(status == NV_OK, failed);
1146 
1147         status = memdescAlloc(pKernelFsp->pSysmemFrtsMemdesc);
1148         NV_ASSERT_OR_GOTO(status == NV_OK, failed);
1149 
1150         // Set up a kernel mapping for future use in RM
1151         status = memdescMap(pKernelFsp->pSysmemFrtsMemdesc, 0, frtsSize, NV_TRUE,
1152                             NV_PROTECT_READ_WRITE, &pVaKernel, &pPrivKernel);
1153         NV_ASSERT_OR_GOTO(status == NV_OK, failed);
1154 
1155         portMemSet(pVaKernel, 0, frtsSize);
1156 
1157         memdescSetKernelMapping(pKernelFsp->pSysmemFrtsMemdesc, pVaKernel);
1158         memdescSetKernelMappingPriv(pKernelFsp->pSysmemFrtsMemdesc, pPrivKernel);
1159 
1160         pCotPayload->frtsSysmemOffset = memdescGetPhysAddr(pKernelFsp->pSysmemFrtsMemdesc, AT_GPU, 0);
1161         pCotPayload->frtsSysmemSize = frtsSize;
1162     }
1163 
1164     // Set up vidmem for FRTS copy
1165     if (!pKernelFsp->getProperty(pKernelFsp, PDB_PROP_KFSP_DISABLE_FRTS_VIDMEM))
1166     {
1167         //
1168         // Since we are very early in the boot path, we cannot know how much
1169         // vidmem reservation RM will need at the end of FB. For now use an
1170         // estimated value to leave enough space for buffers such as vga
1171         // workspace, BAR instance blocks and BAR page directories which will
1172         // be allocated at the end of FB. If more reservations are added in the
1173         // future, this code will need to be updated.
1174         // Bug 200711957 has more info and tracks longer term improvements.
1175         //
1176         const NvU32 ESTIMATED_RESERVE_FB = 0x200000;
1177 
1178         // Offset from end of FB to be used by FSP
1179         pCotPayload->frtsVidmemOffset = ESTIMATED_RESERVE_FB;
1180         pCotPayload->frtsVidmemSize = frtsSize;
1181     }
1182 
1183     pCotPayload->gspFmcSysmemOffset = (NvU64)-1;
1184     pCotPayload->gspBootArgsSysmemOffset = (NvU64)-1;
1185 
1186     // Set up GSP-FMC for FSP to boot GSP
1187     if (!pKernelFsp->getProperty(pKernelFsp, PDB_PROP_KFSP_DISABLE_GSPFMC))
1188     {
1189         status = kfspSetupGspImages(pGpu, pKernelFsp, pCotPayload);
1190         if (status!= NV_OK)
1191         {
1192             NV_PRINTF(LEVEL_ERROR, "Ucode image preparation failed!\n");
1193             goto failed;
1194         }
1195 
1196     }
1197 
1198     status = kfspSendAndReadMessage(pGpu, pKernelFsp, (NvU8 *)pCotPayload,
1199                             sizeof(NVDM_PAYLOAD_COT), NVDM_TYPE_COT, NULL, 0);
1200     if (status != NV_OK)
1201     {
1202         NV_PRINTF(LEVEL_ERROR, "Sent following content to FSP: \n");
1203         NV_PRINTF(LEVEL_ERROR, "version=0x%x, size=0x%x, gspFmcSysmemOffset=0x%llx\n",
1204             pCotPayload->version, pCotPayload->size, pCotPayload->gspFmcSysmemOffset);
1205         NV_PRINTF(LEVEL_ERROR, "frtsSysmemOffset=0x%llx, frtsSysmemSize=0x%x\n",
1206             pCotPayload->frtsSysmemOffset, pCotPayload->frtsSysmemSize);
1207         NV_PRINTF(LEVEL_ERROR, "frtsVidmemOffset=0x%llx, frtsVidmemSize=0x%x\n",
1208             pCotPayload->frtsVidmemOffset, pCotPayload->frtsVidmemSize);
1209         NV_PRINTF(LEVEL_ERROR, "gspBootArgsSysmemOffset=0x%llx\n",
1210             pCotPayload->gspBootArgsSysmemOffset);
1211         goto failed;
1212     }
1213 
1214     //
1215     // Need to check if GSP has been booted here so that we can skip booting
1216     // GSP again later in ACR code path. On GSP-RM, the calling code path (kgsp)
1217     // will check for GSP boot status.
1218     //
1219     if (!pKernelFsp->getProperty(pKernelFsp, PDB_PROP_KFSP_DISABLE_GSPFMC) &&
1220         !pKernelFsp->getProperty(pKernelFsp, PDB_PROP_KFSP_GSP_MODE_GSPRM))
1221     {
1222         status = _kfspCheckGspBootStatus(pGpu, pKernelFsp);
1223         NV_ASSERT_OR_GOTO(status == NV_OK, failed);
1224     }
1225 
1226     // Set property to indicate we only support secure boot at this point
1227     pKernelFsp->setProperty(pKernelFsp, PDB_PROP_KFSP_BOOT_COMMAND_OK, NV_TRUE);
1228     pKernelFsp->pCotPayload = pCotPayload;
1229     return NV_OK;
1230 
1231 failed:
1232     NV_PRINTF(LEVEL_ERROR, "FSP boot cmds failed. RM cannot boot.\n");
1233     kfspDumpDebugState_HAL(pGpu, pKernelFsp);
1234 
1235     memdescDestroy(pKernelFsp->pSysmemFrtsMemdesc);
1236     pKernelFsp->pSysmemFrtsMemdesc = NULL;
1237 
1238     portMemFree(pCotPayload);
1239 
1240     return status;
1241 }
1242 
1243 NV_STATUS
1244 kfspErrorCode2NvStatusMap_GH100
1245 (
1246     OBJGPU    *pGpu,
1247     KernelFsp *pKernelFsp,
1248     NvU32      errorCode
1249 )
1250 {
1251     switch (errorCode)
1252     {
1253         case FSP_OK:
1254         return NV_OK;
1255 
1256         case FSP_ERR_IFR_FILE_NOT_FOUND:
1257         return NV_ERR_OBJECT_NOT_FOUND;
1258 
1259         case FSP_ERR_IFS_ERR_INVALID_STATE:
1260         case FSP_ERR_IFS_ERR_INVALID_DATA:
1261         return NV_ERR_INVALID_DATA;
1262 
1263         default:
1264         return NV_ERR_GENERIC;
1265     }
1266 }
1267 
1268 /*!
1269  * Size of extra memory required to be reserved after FRTS region
1270  */
1271 NvU64
1272 kfspGetExtraReservedMemorySize_GH100
1273 (
1274     OBJGPU    *pGpu,
1275     KernelFsp *pKernelFsp
1276 )
1277 {
1278     // Bug: 3763996
1279     return 4 * 1024;
1280 }
1281