1 /*
2 * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 * SPDX-License-Identifier: MIT
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "nvUnixVersion.h"
25
26 #include "nvidia-modeset-os-interface.h"
27
28 #include "nvkms-api.h"
29 #include "nvkms-rmapi.h"
30 #include "nvkms-vrr.h"
31
32 #include "nvkms-kapi.h"
33 #include "nvkms-kapi-private.h"
34 #include "nvkms-kapi-internal.h"
35 #include "nvkms-kapi-notifiers.h"
36
37 #include <class/cl0000.h> /* NV01_ROOT/NV01_NULL_OBJECT */
38 #include <class/cl003e.h> /* NV01_MEMORY_SYSTEM */
39 #include <class/cl0080.h> /* NV01_DEVICE */
40 #include <class/cl0040.h> /* NV01_MEMORY_LOCAL_USER */
41 #include <class/cl0071.h> /* NV01_MEMORY_SYSTEM_OS_DESCRIPTOR */
42 #include <class/cl2080.h> /* NV20_SUBDEVICE_0 */
43
44 #include <ctrl/ctrl0000/ctrl0000gpu.h> /* NV0000_CTRL_CMD_GPU_GET_ID_INFO_V2 */
45 #include <ctrl/ctrl0000/ctrl0000unix.h> /* NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_FROM_FD */
46 #include <ctrl/ctrl0000/ctrl0000client.h> /* NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_VIDMEM */
47 #include <ctrl/ctrl0080/ctrl0080gpu.h> /* NV0080_CTRL_CMD_GPU_GET_NUM_SUBDEVICES */
48 #include <ctrl/ctrl0080/ctrl0080fb.h> /* NV0080_CTRL_CMD_FB_GET_CAPS_V2 */
49 #include <ctrl/ctrl2080/ctrl2080fb.h> /* NV2080_CTRL_CMD_FB_GET_SEMAPHORE_SURFACE_LAYOUT */
50 #include <ctrl/ctrl2080/ctrl2080unix.h> /* NV2080_CTRL_CMD_OS_UNIX_GC6_BLOCKER_REFCNT */
51
52 #include "ctrl/ctrl003e.h" /* NV003E_CTRL_CMD_GET_SURFACE_PHYS_PAGES */
53 #include "ctrl/ctrl0041.h" /* NV0041_CTRL_SURFACE_INFO */
54
55
56 ct_assert(NVKMS_KAPI_LAYER_PRIMARY_IDX == NVKMS_MAIN_LAYER);
57 ct_assert(NVKMS_KAPI_LAYER_MAX == NVKMS_MAX_LAYERS_PER_HEAD);
58
59 /* XXX Move to NVKMS */
60 #define NV_EVO_PITCH_ALIGNMENT 0x100
61
62 #define NVKMS_KAPI_SUPPORTED_EVENTS_MASK \
63 ((1 << NVKMS_EVENT_TYPE_DPY_CHANGED) | \
64 (1 << NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED) | \
65 (1 << NVKMS_EVENT_TYPE_FLIP_OCCURRED))
66
EnumerateGpus(nv_gpu_info_t * gpuInfo)67 static NvU32 EnumerateGpus(nv_gpu_info_t *gpuInfo)
68 {
69 return nvkms_enumerate_gpus(gpuInfo);
70 }
71
72 /*
73 * Helper function to free RM objects allocated for NvKmsKapiDevice.
74 */
RmFreeDevice(struct NvKmsKapiDevice * device)75 static void RmFreeDevice(struct NvKmsKapiDevice *device)
76 {
77 if (device->hRmSubDevice != 0x0) {
78 nvRmApiFree(device->hRmClient,
79 device->hRmDevice,
80 device->hRmSubDevice);
81 nvKmsKapiFreeRmHandle(device, device->hRmSubDevice);
82 device->hRmSubDevice = 0x0;
83 }
84
85 /* Free RM device object */
86
87 if (device->hRmDevice != 0x0) {
88 nvRmApiFree(device->hRmClient,
89 device->hRmClient,
90 device->hRmDevice);
91 nvKmsKapiFreeRmHandle(device, device->hRmDevice);
92
93 device->hRmDevice = 0x0;
94 }
95
96 nvTearDownUnixRmHandleAllocator(&device->handleAllocator);
97
98 device->deviceInstance = 0;
99
100 /* Free RM client */
101
102 if (device->hRmClient != 0x0) {
103 nvRmApiFree(device->hRmClient,
104 device->hRmClient,
105 device->hRmClient);
106
107 device->hRmClient = 0x0;
108 }
109 }
110
111 /*
112 * Helper function to allocate RM objects for NvKmsKapiDevice.
113 */
RmAllocateDevice(struct NvKmsKapiDevice * device)114 static NvBool RmAllocateDevice(struct NvKmsKapiDevice *device)
115 {
116 NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS getNumSubDevicesParams = { 0 };
117 NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS idInfoParams = { };
118 NV2080_ALLOC_PARAMETERS subdevAllocParams = { 0 };
119 NV0080_ALLOC_PARAMETERS allocParams = { };
120 NV0080_CTRL_FB_GET_CAPS_V2_PARAMS fbCapsParams = { 0 };
121
122 NvU32 hRmDevice, hRmSubDevice;
123 NvBool supportsGenericPageKind;
124 NvU32 ret;
125
126 /* Allocate RM client */
127
128 ret = nvRmApiAlloc(NV01_NULL_OBJECT,
129 NV01_NULL_OBJECT,
130 NV01_NULL_OBJECT,
131 NV01_ROOT,
132 &device->hRmClient);
133
134 if (ret != NVOS_STATUS_SUCCESS || device->hRmClient == 0x0) {
135 nvKmsKapiLogDeviceDebug(device, "Failed to allocate RM client");
136 goto failed;
137 }
138
139 /* Query device instance */
140
141 idInfoParams.gpuId = device->gpuId;
142
143 ret = nvRmApiControl(device->hRmClient,
144 device->hRmClient,
145 NV0000_CTRL_CMD_GPU_GET_ID_INFO_V2,
146 &idInfoParams,
147 sizeof(idInfoParams));
148
149 if (ret != NVOS_STATUS_SUCCESS) {
150 nvKmsKapiLogDeviceDebug(device, "Failed to query device instance");
151 goto failed;
152 }
153
154 device->deviceInstance = idInfoParams.deviceInstance;
155 device->isSOC =
156 FLD_TEST_DRF(0000, _CTRL_GPU_ID_INFO, _SOC, _TRUE,
157 idInfoParams.gpuFlags);
158
159 /* Initialize RM handle allocator */
160
161 if (!nvInitUnixRmHandleAllocator(&device->handleAllocator,
162 device->hRmClient,
163 device->deviceInstance + 1)) {
164 nvKmsKapiLogDeviceDebug(device, "Failed to initialize RM handle allocator");
165 goto failed;
166 }
167
168 /* Allocate RM device object */
169
170 hRmDevice = nvKmsKapiGenerateRmHandle(device);
171
172 if (hRmDevice == 0x0) {
173 nvKmsKapiLogDeviceDebug(device, "Failed to allocate RM handle");
174 goto failed;
175 }
176
177 allocParams.deviceId = device->deviceInstance;
178
179 allocParams.hClientShare = device->hRmClient;
180
181 ret = nvRmApiAlloc(device->hRmClient,
182 device->hRmClient,
183 hRmDevice,
184 NV01_DEVICE_0,
185 &allocParams);
186
187 if (ret != NVOS_STATUS_SUCCESS) {
188 nvKmsKapiLogDeviceDebug(device, "Failed to allocate RM device object");
189 nvKmsKapiFreeRmHandle(device, hRmDevice);
190 goto failed;
191 }
192
193 device->hRmDevice = hRmDevice;
194
195 ret = nvRmApiControl(device->hRmClient,
196 device->hRmDevice,
197 NV0080_CTRL_CMD_GPU_GET_NUM_SUBDEVICES,
198 &getNumSubDevicesParams,
199 sizeof(getNumSubDevicesParams));
200
201 if (ret != NVOS_STATUS_SUCCESS) {
202 nvKmsKapiLogDeviceDebug(device, "Failed to determine number of GPUs");
203 goto failed;
204 }
205
206 if (getNumSubDevicesParams.numSubDevices != 1) {
207 nvKmsKapiLogDeviceDebug(
208 device,
209 "Unsupported number of GPUs: %d",
210 getNumSubDevicesParams.numSubDevices);
211 goto failed;
212 }
213
214 hRmSubDevice = nvKmsKapiGenerateRmHandle(device);
215
216 if (hRmDevice == 0x0) {
217 nvKmsKapiLogDeviceDebug(device, "Failed to allocate RM handle");
218 goto failed;
219 }
220
221 subdevAllocParams.subDeviceId = 0;
222
223 ret = nvRmApiAlloc(device->hRmClient,
224 device->hRmDevice,
225 hRmSubDevice,
226 NV20_SUBDEVICE_0,
227 &subdevAllocParams);
228
229 if (ret != NVOS_STATUS_SUCCESS) {
230 nvKmsKapiLogDeviceDebug(device, "Failed to initialize subDevice");
231 nvKmsKapiFreeRmHandle(device, hRmSubDevice);
232 goto failed;
233 }
234
235 device->hRmSubDevice = hRmSubDevice;
236
237 if (device->isSOC) {
238 /* NVKMS is only used on T23X and later chips,
239 * which all support generic memory. */
240 supportsGenericPageKind = NV_TRUE;
241 } else {
242 ret = nvRmApiControl(device->hRmClient,
243 device->hRmDevice,
244 NV0080_CTRL_CMD_FB_GET_CAPS_V2,
245 &fbCapsParams,
246 sizeof (fbCapsParams));
247 if (ret != NVOS_STATUS_SUCCESS) {
248 nvKmsKapiLogDeviceDebug(device, "Failed to query framebuffer capabilities");
249 goto failed;
250 }
251 supportsGenericPageKind =
252 NV0080_CTRL_FB_GET_CAP(fbCapsParams.capsTbl,
253 NV0080_CTRL_FB_CAPS_GENERIC_PAGE_KIND);
254 }
255
256 device->caps.genericPageKind =
257 supportsGenericPageKind ?
258 0x06 /* NV_MMU_PTE_KIND_GENERIC_MEMORY */ :
259 0xfe /* NV_MMU_PTE_KIND_GENERIC_16BX2 */;
260
261 return NV_TRUE;
262
263 failed:
264
265 RmFreeDevice(device);
266
267 return NV_FALSE;
268 }
269
270 /*
271 * Helper function to free NVKMS objects allocated for NvKmsKapiDevice.
272 */
KmsFreeDevice(struct NvKmsKapiDevice * device)273 static void KmsFreeDevice(struct NvKmsKapiDevice *device)
274 {
275 /* Free notifier memory */
276
277 nvKmsKapiFreeNotifiers(device);
278
279 /* Free NVKMS device */
280
281 if (device->hKmsDevice != 0x0) {
282 struct NvKmsFreeDeviceParams paramsFree = { };
283
284 paramsFree.request.deviceHandle = device->hKmsDevice;
285
286 nvkms_ioctl_from_kapi(device->pKmsOpen,
287 NVKMS_IOCTL_FREE_DEVICE,
288 ¶msFree, sizeof(paramsFree));
289
290 device->hKmsDevice = device->hKmsDisp = 0x0;
291 }
292
293 /* Close NVKMS */
294
295 if (device->pKmsOpen != NULL) {
296 nvkms_close_from_kapi(device->pKmsOpen);
297 device->pKmsOpen = NULL;
298 }
299 }
300
301 /*
302 * Helper function to allocate NVKMS objects for NvKmsKapiDevice.
303 */
KmsAllocateDevice(struct NvKmsKapiDevice * device)304 static NvBool KmsAllocateDevice(struct NvKmsKapiDevice *device)
305 {
306 struct NvKmsAllocDeviceParams *paramsAlloc;
307 NvBool status;
308 NvBool inVideoMemory = FALSE;
309 NvU32 head;
310 NvBool ret = FALSE;
311 NvU32 layer;
312
313 paramsAlloc = nvKmsKapiCalloc(1, sizeof(*paramsAlloc));
314 if (paramsAlloc == NULL) {
315 return FALSE;
316 }
317
318 /* Open NVKMS */
319
320 device->pKmsOpen = nvkms_open_from_kapi(device);
321
322 if (device->pKmsOpen == NULL) {
323 nvKmsKapiLogDeviceDebug(device, "Failed to Open NVKMS");
324 goto done;
325 }
326
327 /* Allocate NVKMS device */
328
329 nvkms_strncpy(
330 paramsAlloc->request.versionString,
331 NV_VERSION_STRING,
332 sizeof(paramsAlloc->request.versionString));
333
334 if (device->isSOC) {
335 paramsAlloc->request.deviceId = NVKMS_DEVICE_ID_TEGRA;
336 } else {
337 paramsAlloc->request.deviceId = device->deviceInstance;
338 }
339 paramsAlloc->request.sliMosaic = NV_FALSE;
340 paramsAlloc->request.enableConsoleHotplugHandling = NV_TRUE;
341
342 status = nvkms_ioctl_from_kapi(device->pKmsOpen,
343 NVKMS_IOCTL_ALLOC_DEVICE,
344 paramsAlloc, sizeof(*paramsAlloc));
345
346 if (!status ||
347 paramsAlloc->reply.status != NVKMS_ALLOC_DEVICE_STATUS_SUCCESS) {
348
349 if (paramsAlloc->reply.status ==
350 NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE) {
351 nvKmsKapiLogDeviceDebug(
352 device,
353 "Display hardware is not available; falling back to "
354 "displayless mode");
355
356 ret = TRUE;
357 goto done;
358 }
359
360 nvKmsKapiLogDeviceDebug(
361 device,
362 "Failed to NVKM device %u(%u): %d %d\n",
363 device->gpuId,
364 paramsAlloc->request.deviceId,
365 status,
366 paramsAlloc->reply.status);
367
368 goto done;
369 }
370
371 device->hKmsDevice = paramsAlloc->reply.deviceHandle;
372
373 device->caps.cursorCompositionCaps =
374 paramsAlloc->reply.cursorCompositionCaps;
375
376 device->caps.overlayCompositionCaps =
377 paramsAlloc->reply.layerCaps[NVKMS_OVERLAY_LAYER].composition;
378
379 device->caps.validLayerRRTransforms =
380 paramsAlloc->reply.validLayerRRTransforms;
381
382 device->caps.maxWidthInPixels = paramsAlloc->reply.maxWidthInPixels;
383 device->caps.maxHeightInPixels = paramsAlloc->reply.maxHeightInPixels;
384 device->caps.maxCursorSizeInPixels = paramsAlloc->reply.maxCursorSize;
385 device->caps.requiresVrrSemaphores = paramsAlloc->reply.requiresVrrSemaphores;
386
387 /* XXX Add LUT support */
388
389 device->numHeads = paramsAlloc->reply.numHeads;
390
391 for (head = 0; head < device->numHeads; head++) {
392 if (paramsAlloc->reply.numLayers[head] < 2) {
393 goto done;
394 }
395 device->numLayers[head] = paramsAlloc->reply.numLayers[head];
396 }
397
398 for (layer = 0; layer < NVKMS_KAPI_LAYER_MAX; layer++) {
399 device->supportedSurfaceMemoryFormats[layer] =
400 paramsAlloc->reply.layerCaps[layer].supportedSurfaceMemoryFormats;
401 device->supportsHDR[layer] = paramsAlloc->reply.layerCaps[layer].supportsHDR;
402 }
403
404 if (paramsAlloc->reply.validNIsoFormatMask &
405 (1 << NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY)) {
406 device->notifier.format = NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY;
407 } else if (paramsAlloc->reply.validNIsoFormatMask &
408 (1 << NVKMS_NISO_FORMAT_FOUR_WORD)) {
409 device->notifier.format = NVKMS_NISO_FORMAT_FOUR_WORD;
410 } else {
411 nvAssert(paramsAlloc->reply.validNIsoFormatMask &
412 (1 << NVKMS_NISO_FORMAT_LEGACY));
413 device->notifier.format = NVKMS_NISO_FORMAT_LEGACY;
414 }
415
416 /* XXX Add support for SLI/multiple display engines per device */
417 if (paramsAlloc->reply.numDisps != 1)
418 {
419 nvKmsKapiLogDeviceDebug(device, "Found unsupported SLI configuration");
420 goto done;
421 }
422
423 device->hKmsDisp = paramsAlloc->reply.dispHandles[0];
424 device->dispIdx = 0;
425
426 device->subDeviceMask = paramsAlloc->reply.subDeviceMask;
427
428 device->isoIOCoherencyModes = paramsAlloc->reply.isoIOCoherencyModes;
429 device->nisoIOCoherencyModes = paramsAlloc->reply.nisoIOCoherencyModes;
430
431 device->supportsSyncpts = paramsAlloc->reply.supportsSyncpts;
432
433 if (paramsAlloc->reply.nIsoSurfacesInVidmemOnly) {
434 inVideoMemory = TRUE;
435 }
436
437 /* Allocate notifier memory */
438 if (!nvKmsKapiAllocateNotifiers(device, inVideoMemory)) {
439 nvKmsKapiLogDebug(
440 "Failed to allocate Notifier objects for GPU ID 0x%08x",
441 device->gpuId);
442 goto done;
443 }
444
445 ret = NV_TRUE;
446
447 done:
448 if (!ret) {
449 KmsFreeDevice(device);
450 }
451
452 nvKmsKapiFree(paramsAlloc);
453
454 return ret;
455 }
456
FreeDevice(struct NvKmsKapiDevice * device)457 static void FreeDevice(struct NvKmsKapiDevice *device)
458 {
459 /* Free NVKMS objects allocated for NvKmsKapiDevice */
460
461 KmsFreeDevice(device);
462
463 /* Free RM objects allocated for NvKmsKapiDevice */
464
465 RmFreeDevice(device);
466
467 /* Lower the reference count of gpu. */
468
469 nvkms_close_gpu(device->gpuId);
470
471 if (device->pSema != NULL) {
472 nvkms_sema_free(device->pSema);
473 }
474
475 nvKmsKapiFree(device);
476 }
477
nvKmsKapiAllocateSystemMemory(struct NvKmsKapiDevice * device,NvU32 hRmHandle,enum NvKmsSurfaceMemoryLayout layout,NvU64 size,enum NvKmsKapiAllocationType type,NvU8 * compressible)478 NvBool nvKmsKapiAllocateSystemMemory(struct NvKmsKapiDevice *device,
479 NvU32 hRmHandle,
480 enum NvKmsSurfaceMemoryLayout layout,
481 NvU64 size,
482 enum NvKmsKapiAllocationType type,
483 NvU8 *compressible)
484 {
485 NvU32 ret;
486 NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { };
487 const NvKmsDispIOCoherencyModes *pIOCoherencyModes = NULL;
488
489 memAllocParams.owner = NVKMS_RM_HEAP_ID;
490 memAllocParams.size = size;
491
492 switch (layout) {
493 case NvKmsSurfaceMemoryLayoutBlockLinear:
494 memAllocParams.attr =
495 FLD_SET_DRF(OS32, _ATTR, _FORMAT, _BLOCK_LINEAR,
496 memAllocParams.attr);
497 if (*compressible) {
498 /*
499 * RM will choose a compressed page kind and hence allocate
500 * comptags for color surfaces >= 32bpp. The actual kind
501 * chosen isn't important, as it can be overridden by creating
502 * a virtual alloc with a different kind when mapping the
503 * memory into the GPU.
504 */
505 memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _DEPTH, _32,
506 memAllocParams.attr);
507 memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _COMPR, _ANY,
508 memAllocParams.attr);
509 } else {
510 memAllocParams.attr =
511 FLD_SET_DRF(OS32, _ATTR, _DEPTH, _UNKNOWN,
512 memAllocParams.attr);
513 }
514 break;
515
516 case NvKmsSurfaceMemoryLayoutPitch:
517 memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _FORMAT, _PITCH,
518 memAllocParams.attr);
519 break;
520
521 default:
522 nvKmsKapiLogDeviceDebug(device, "Unknown Memory Layout");
523 return NV_FALSE;
524 }
525
526 switch (type) {
527 case NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT:
528 /* XXX Note compression and scanout do not work together on
529 * any current GPUs. However, some use cases do involve scanning
530 * out a compression-capable surface:
531 *
532 * 1) Mapping the compressible surface as non-compressed when
533 * generating its content.
534 *
535 * 2) Using decompress-in-place to decompress the surface content
536 * before scanning it out.
537 *
538 * Hence creating compressed allocations of TYPE_SCANOUT is allowed.
539 */
540
541 pIOCoherencyModes = &device->isoIOCoherencyModes;
542
543 break;
544 case NVKMS_KAPI_ALLOCATION_TYPE_NOTIFIER:
545 if (layout == NvKmsSurfaceMemoryLayoutBlockLinear) {
546 nvKmsKapiLogDeviceDebug(device,
547 "Attempting creation of BlockLinear notifier memory");
548 return NV_FALSE;
549 }
550
551 memAllocParams.attr2 = FLD_SET_DRF(OS32, _ATTR2, _NISO_DISPLAY,
552 _YES, memAllocParams.attr2);
553
554 pIOCoherencyModes = &device->nisoIOCoherencyModes;
555
556 break;
557 case NVKMS_KAPI_ALLOCATION_TYPE_OFFSCREEN:
558 memAllocParams.flags |= NVOS32_ALLOC_FLAGS_NO_SCANOUT;
559 break;
560 default:
561 nvKmsKapiLogDeviceDebug(device, "Unknown Allocation Type");
562 return NV_FALSE;
563 }
564
565 memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _PCI,
566 memAllocParams.attr);
567 memAllocParams.attr2 = FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _NO,
568 memAllocParams.attr2);
569
570 if (pIOCoherencyModes == NULL || !pIOCoherencyModes->coherent) {
571 memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY,
572 _WRITE_COMBINE, memAllocParams.attr);
573 } else {
574 memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY,
575 _WRITE_BACK, memAllocParams.attr);
576 }
577
578 memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _NONCONTIGUOUS,
579 memAllocParams.attr);
580
581 ret = nvRmApiAlloc(device->hRmClient,
582 device->hRmDevice,
583 hRmHandle,
584 NV01_MEMORY_SYSTEM,
585 &memAllocParams);
586 if (ret != NVOS_STATUS_SUCCESS) {
587 nvKmsKapiLogDeviceDebug(
588 device,
589 "nvRmApiAlloc failed with error code 0x%08x",
590 ret);
591
592 return NV_FALSE;
593 }
594
595 if (FLD_TEST_DRF(OS32, _ATTR, _COMPR, _NONE,
596 memAllocParams.attr)) {
597 *compressible = 0;
598 } else {
599 *compressible = 1;
600 }
601
602 return TRUE;
603 }
604
nvKmsKapiAllocateVideoMemory(struct NvKmsKapiDevice * device,NvU32 hRmHandle,enum NvKmsSurfaceMemoryLayout layout,NvU64 size,enum NvKmsKapiAllocationType type,NvU8 * compressible)605 NvBool nvKmsKapiAllocateVideoMemory(struct NvKmsKapiDevice *device,
606 NvU32 hRmHandle,
607 enum NvKmsSurfaceMemoryLayout layout,
608 NvU64 size,
609 enum NvKmsKapiAllocationType type,
610 NvU8 *compressible)
611 {
612 NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { };
613 NvU32 ret;
614
615 memAllocParams.owner = NVKMS_RM_HEAP_ID;
616 memAllocParams.size = size;
617
618 switch (layout) {
619 case NvKmsSurfaceMemoryLayoutBlockLinear:
620 memAllocParams.attr =
621 FLD_SET_DRF(OS32, _ATTR, _FORMAT, _BLOCK_LINEAR,
622 memAllocParams.attr);
623
624 if (*compressible) {
625 /*
626 * RM will choose a compressed page kind and hence allocate
627 * comptags for color surfaces >= 32bpp. The actual kind
628 * chosen isn't important, as it can be overridden by creating
629 * a virtual alloc with a different kind when mapping the
630 * memory into the GPU.
631 */
632 memAllocParams.attr =
633 FLD_SET_DRF(OS32, _ATTR, _DEPTH, _32,
634 memAllocParams.attr);
635 memAllocParams.attr =
636 FLD_SET_DRF(OS32, _ATTR, _COMPR, _ANY,
637 memAllocParams.attr);
638 } else {
639 memAllocParams.attr =
640 FLD_SET_DRF(OS32, _ATTR, _DEPTH, _UNKNOWN,
641 memAllocParams.attr);
642 }
643 break;
644
645 case NvKmsSurfaceMemoryLayoutPitch:
646 memAllocParams.attr =
647 FLD_SET_DRF(OS32, _ATTR, _FORMAT, _PITCH,
648 memAllocParams.attr);
649 break;
650
651 default:
652 nvKmsKapiLogDeviceDebug(device, "Unknown Memory Layout");
653 return NV_FALSE;
654 }
655
656
657 memAllocParams.attr =
658 FLD_SET_DRF(OS32, _ATTR, _LOCATION, _VIDMEM,
659 memAllocParams.attr);
660 memAllocParams.attr2 =
661 FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _NO,
662 memAllocParams.attr2);
663
664 switch (type) {
665 case NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT:
666 /* XXX [JRJ] Not quite right. This can also be used to allocate
667 * cursor images. The stuff RM does with this field is kind of
668 * black magic, and I can't tell if it actually matters.
669 */
670 memAllocParams.type = NVOS32_TYPE_PRIMARY;
671
672 memAllocParams.alignment = NV_EVO_SURFACE_ALIGNMENT;
673 memAllocParams.flags |=
674 NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE | /* Pick up above EVO alignment */
675 NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_UP; /* X sets this for cursors */
676 memAllocParams.attr =
677 FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS,
678 memAllocParams.attr);
679
680 /* XXX [JRJ] Note compression and scanout do not work together on
681 * any current GPUs. However, some use cases do involve scanning
682 * out a compression-capable surface:
683 *
684 * 1) Mapping the compressible surface as non-compressed when
685 * generating its content.
686 *
687 * 2) Using decompress-in-place to decompress the surface content
688 * before scanning it out.
689 *
690 * Hence creating compressed allocations of TYPE_SCANOUT is allowed.
691 */
692
693 break;
694 case NVKMS_KAPI_ALLOCATION_TYPE_NOTIFIER:
695 if (layout == NvKmsSurfaceMemoryLayoutBlockLinear) {
696 nvKmsKapiLogDeviceDebug(device,
697 "Attempting creation of BlockLinear notifier memory");
698 return NV_FALSE;
699 }
700
701 memAllocParams.type = NVOS32_TYPE_DMA;
702
703 memAllocParams.attr =
704 FLD_SET_DRF(OS32, _ATTR, _PAGE_SIZE, _4KB,
705 memAllocParams.attr);
706 memAllocParams.attr =
707 FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _UNCACHED,
708 memAllocParams.attr);
709
710 break;
711 case NVKMS_KAPI_ALLOCATION_TYPE_OFFSCREEN:
712 memAllocParams.type = NVOS32_TYPE_IMAGE;
713 memAllocParams.flags |=
714 NVOS32_ALLOC_FLAGS_NO_SCANOUT |
715 NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_UP;
716 memAllocParams.attr =
717 FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _NONCONTIGUOUS,
718 memAllocParams.attr);
719 break;
720 default:
721 nvKmsKapiLogDeviceDebug(device, "Unknown Allocation Type");
722 return NV_FALSE;
723 }
724
725 ret = nvRmApiAlloc(device->hRmClient,
726 device->hRmDevice,
727 hRmHandle,
728 NV01_MEMORY_LOCAL_USER,
729 &memAllocParams);
730
731 if (ret != NVOS_STATUS_SUCCESS) {
732 nvKmsKapiLogDeviceDebug(
733 device,
734 "VidHeapControl failed with error code 0x%08x",
735 ret);
736
737 return NV_FALSE;
738 }
739
740 if (FLD_TEST_DRF(OS32, _ATTR, _COMPR, _NONE,
741 memAllocParams.attr)) {
742 *compressible = 0;
743 } else {
744 *compressible = 1;
745 }
746
747 return NV_TRUE;
748 }
749
AllocateDevice(const struct NvKmsKapiAllocateDeviceParams * params)750 static struct NvKmsKapiDevice* AllocateDevice
751 (
752 const struct NvKmsKapiAllocateDeviceParams *params
753 )
754 {
755 struct NvKmsKapiDevice *device = NULL;
756
757 device = nvKmsKapiCalloc(1, sizeof(*device));
758
759 if (device == NULL) {
760 nvKmsKapiLogDebug(
761 "Failed to allocate memory for NvKmsKapiDevice of GPU ID 0x%08x",
762 params->gpuId);
763 goto failed;
764 }
765
766 device->pSema = nvkms_sema_alloc();
767
768 if (device->pSema == NULL) {
769 nvKmsKapiLogDebug(
770 "Failed to allocate semaphore for NvKmsKapiDevice of GPU ID 0x%08x",
771 params->gpuId);
772 goto failed;
773 }
774
775 /* Raise the reference count of gpu. */
776
777 if (!nvkms_open_gpu(params->gpuId)) {
778 nvKmsKapiLogDebug("Failed to open GPU ID 0x%08x", params->gpuId);
779 goto failed;
780 }
781
782 device->gpuId = params->gpuId;
783
784 nvKmsKapiLogDebug(
785 "Allocating NvKmsKapiDevice 0x%p for GPU ID 0x%08x",
786 device,
787 device->gpuId);
788
789 /* Allocate RM object for NvKmsKapiDevice */
790
791 if (!RmAllocateDevice(device)) {
792 nvKmsKapiLogDebug(
793 "Failed to allocate RM objects for GPU ID 0x%08x",
794 device->gpuId);
795 goto failed;
796 }
797
798 /* Allocate NVKMS objects for NvKmsKapiDevice */
799
800 if (!KmsAllocateDevice(device)) {
801 nvKmsKapiLogDebug(
802 "Failed to allocate NVKMS objects for GPU ID 0x%08x",
803 device->gpuId);
804 goto failed;
805 }
806
807 device->privateData = params->privateData;
808 device->eventCallback = params->eventCallback;
809
810 return device;
811
812 failed:
813
814 FreeDevice(device);
815
816 return NULL;
817 }
818
GrabOwnership(struct NvKmsKapiDevice * device)819 static NvBool GrabOwnership(struct NvKmsKapiDevice *device)
820 {
821 struct NvKmsGrabOwnershipParams paramsGrab = { };
822
823 if (device->hKmsDevice == 0x0) {
824 return NV_TRUE;
825 }
826
827 paramsGrab.request.deviceHandle = device->hKmsDevice;
828
829 return nvkms_ioctl_from_kapi(device->pKmsOpen,
830 NVKMS_IOCTL_GRAB_OWNERSHIP,
831 ¶msGrab, sizeof(paramsGrab));
832
833 }
834
ReleaseOwnership(struct NvKmsKapiDevice * device)835 static void ReleaseOwnership(struct NvKmsKapiDevice *device)
836 {
837 struct NvKmsReleaseOwnershipParams paramsRelease = { };
838
839 if (device->hKmsDevice == 0x0) {
840 return;
841 }
842
843 paramsRelease.request.deviceHandle = device->hKmsDevice;
844
845 nvkms_ioctl_from_kapi(device->pKmsOpen,
846 NVKMS_IOCTL_RELEASE_OWNERSHIP,
847 ¶msRelease, sizeof(paramsRelease));
848 }
849
GrantPermissions(NvS32 fd,struct NvKmsKapiDevice * device,NvU32 head,NvKmsKapiDisplay display)850 static NvBool GrantPermissions
851 (
852 NvS32 fd,
853 struct NvKmsKapiDevice *device,
854 NvU32 head,
855 NvKmsKapiDisplay display
856 )
857 {
858 struct NvKmsGrantPermissionsParams paramsGrant = { };
859 struct NvKmsPermissions *perm = ¶msGrant.request.permissions;
860 NvU32 dispIdx = device->dispIdx;
861
862 if (dispIdx >= ARRAY_LEN(perm->modeset.disp) ||
863 head >= ARRAY_LEN(perm->modeset.disp[0].head) || device == NULL) {
864 return NV_FALSE;
865 }
866
867 if (device->hKmsDevice == 0x0) {
868 return NV_TRUE;
869 }
870
871 perm->type = NV_KMS_PERMISSIONS_TYPE_MODESET;
872 perm->modeset.disp[dispIdx].head[head].dpyIdList =
873 nvAddDpyIdToEmptyDpyIdList(nvNvU32ToDpyId(display));
874
875 paramsGrant.request.fd = fd;
876 paramsGrant.request.deviceHandle = device->hKmsDevice;
877
878 return nvkms_ioctl_from_kapi(device->pKmsOpen,
879 NVKMS_IOCTL_GRANT_PERMISSIONS, ¶msGrant,
880 sizeof(paramsGrant));
881 }
882
RevokePermissions(struct NvKmsKapiDevice * device,NvU32 head,NvKmsKapiDisplay display)883 static NvBool RevokePermissions
884 (
885 struct NvKmsKapiDevice *device,
886 NvU32 head,
887 NvKmsKapiDisplay display
888 )
889 {
890 struct NvKmsRevokePermissionsParams paramsRevoke = { };
891 struct NvKmsPermissions *perm = ¶msRevoke.request.permissions;
892 NvU32 dispIdx = device->dispIdx;
893
894
895 if (dispIdx >= ARRAY_LEN(perm->modeset.disp) ||
896 head >= ARRAY_LEN(perm->modeset.disp[0].head) || device == NULL) {
897 return NV_FALSE;
898 }
899
900 if (device->hKmsDevice == 0x0) {
901 return NV_TRUE;
902 }
903
904 perm->type = NV_KMS_PERMISSIONS_TYPE_MODESET;
905 perm->modeset.disp[dispIdx].head[head].dpyIdList =
906 nvAddDpyIdToEmptyDpyIdList(nvNvU32ToDpyId(display));
907
908 paramsRevoke.request.deviceHandle = device->hKmsDevice;
909
910 return nvkms_ioctl_from_kapi(device->pKmsOpen,
911 NVKMS_IOCTL_REVOKE_PERMISSIONS, ¶msRevoke,
912 sizeof(paramsRevoke));
913 }
914
GrantSubOwnership(NvS32 fd,struct NvKmsKapiDevice * device)915 static NvBool GrantSubOwnership
916 (
917 NvS32 fd,
918 struct NvKmsKapiDevice *device
919 )
920 {
921 struct NvKmsGrantPermissionsParams paramsGrant = { };
922 struct NvKmsPermissions *perm = ¶msGrant.request.permissions;
923
924 if (device->hKmsDevice == 0x0) {
925 return NV_TRUE;
926 }
927
928 perm->type = NV_KMS_PERMISSIONS_TYPE_SUB_OWNER;
929
930 paramsGrant.request.fd = fd;
931 paramsGrant.request.deviceHandle = device->hKmsDevice;
932
933 return nvkms_ioctl_from_kapi(device->pKmsOpen,
934 NVKMS_IOCTL_GRANT_PERMISSIONS, ¶msGrant,
935 sizeof(paramsGrant));
936 }
937
RevokeSubOwnership(struct NvKmsKapiDevice * device)938 static NvBool RevokeSubOwnership
939 (
940 struct NvKmsKapiDevice *device
941 )
942 {
943 struct NvKmsRevokePermissionsParams paramsRevoke = { };
944
945 if (device->hKmsDevice == 0x0) {
946 return NV_TRUE;
947 }
948
949 paramsRevoke.request.permissionsTypeBitmask =
950 NVBIT(NV_KMS_PERMISSIONS_TYPE_FLIPPING) |
951 NVBIT(NV_KMS_PERMISSIONS_TYPE_MODESET) |
952 NVBIT(NV_KMS_PERMISSIONS_TYPE_SUB_OWNER);
953 paramsRevoke.request.deviceHandle = device->hKmsDevice;
954
955 return nvkms_ioctl_from_kapi(device->pKmsOpen,
956 NVKMS_IOCTL_REVOKE_PERMISSIONS, ¶msRevoke,
957 sizeof(paramsRevoke));
958 }
959
DeclareEventInterest(const struct NvKmsKapiDevice * device,const NvU32 interestMask)960 static NvBool DeclareEventInterest
961 (
962 const struct NvKmsKapiDevice *device,
963 const NvU32 interestMask
964 )
965 {
966 struct NvKmsDeclareEventInterestParams kmsEventParams = { };
967
968 if (device->hKmsDevice == 0x0 || device->eventCallback == NULL) {
969 return NV_TRUE;
970 }
971
972 kmsEventParams.request.interestMask =
973 interestMask & NVKMS_KAPI_SUPPORTED_EVENTS_MASK;
974
975 return nvkms_ioctl_from_kapi(device->pKmsOpen,
976 NVKMS_IOCTL_DECLARE_EVENT_INTEREST,
977 &kmsEventParams, sizeof(kmsEventParams));
978 }
979
GetDeviceResourcesInfo(struct NvKmsKapiDevice * device,struct NvKmsKapiDeviceResourcesInfo * info)980 static NvBool GetDeviceResourcesInfo
981 (
982 struct NvKmsKapiDevice *device,
983 struct NvKmsKapiDeviceResourcesInfo *info
984 )
985 {
986 struct NvKmsQueryDispParams paramsDisp = { };
987 NV2080_CTRL_FB_GET_SEMAPHORE_SURFACE_LAYOUT_PARAMS semsurfLayoutParams = { };
988 NvBool status = NV_FALSE;
989 NvU32 ret;
990
991 NvU32 i;
992
993 nvkms_memset(info, 0, sizeof(*info));
994
995 ret = nvRmApiControl(device->hRmClient,
996 device->hRmSubDevice,
997 NV2080_CTRL_CMD_FB_GET_SEMAPHORE_SURFACE_LAYOUT,
998 &semsurfLayoutParams,
999 sizeof(semsurfLayoutParams));
1000
1001 if (ret == NVOS_STATUS_SUCCESS) {
1002 info->caps.semsurf.stride = semsurfLayoutParams.size;
1003 info->caps.semsurf.maxSubmittedOffset =
1004 semsurfLayoutParams.maxSubmittedSemaphoreValueOffset;
1005 } else {
1006 /* Non-fatal. No semaphore surface support. */
1007 info->caps.semsurf.stride = 0;
1008 info->caps.semsurf.maxSubmittedOffset = 0;
1009 }
1010
1011 info->caps.hasVideoMemory = !device->isSOC;
1012 info->caps.genericPageKind = device->caps.genericPageKind;
1013
1014 if (device->hKmsDevice == 0x0) {
1015 info->caps.pitchAlignment = 0x1;
1016 return NV_TRUE;
1017 }
1018
1019 paramsDisp.request.deviceHandle = device->hKmsDevice;
1020 paramsDisp.request.dispHandle = device->hKmsDisp;
1021
1022 status = nvkms_ioctl_from_kapi(device->pKmsOpen,
1023 NVKMS_IOCTL_QUERY_DISP,
1024 ¶msDisp, sizeof(paramsDisp));
1025
1026 if (!status)
1027 {
1028 nvKmsKapiLogDeviceDebug(
1029 device,
1030 "Failed to query display engine information");
1031
1032 goto done;
1033 }
1034
1035 info->numHeads = device->numHeads;
1036
1037 ct_assert(sizeof(info->numLayers) == sizeof(device->numLayers));
1038 nvkms_memcpy(info->numLayers, device->numLayers, sizeof(device->numLayers));
1039
1040 ct_assert(ARRAY_LEN(info->connectorHandles) >=
1041 ARRAY_LEN(paramsDisp.reply.connectorHandles));
1042
1043 info->numConnectors = paramsDisp.reply.numConnectors;
1044
1045 for (i = 0; i < paramsDisp.reply.numConnectors; i++) {
1046 info->connectorHandles[i] = paramsDisp.reply.connectorHandles[i];
1047 }
1048
1049 {
1050 const struct NvKmsCompositionCapabilities *pCaps =
1051 &device->caps.cursorCompositionCaps;
1052
1053 info->caps.validCursorCompositionModes =
1054 pCaps->colorKeySelect[NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE].
1055 supportedBlendModes[1];
1056 }
1057
1058 for (i = 0; i < NVKMS_KAPI_LAYER_MAX; i++) {
1059 if (i == NVKMS_KAPI_LAYER_PRIMARY_IDX) {
1060 info->caps.layer[i].validCompositionModes =
1061 NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE);
1062 } else {
1063 const struct NvKmsCompositionCapabilities *pCaps =
1064 &device->caps.overlayCompositionCaps;
1065
1066 info->caps.layer[i].validCompositionModes =
1067 pCaps->colorKeySelect[NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE].
1068 supportedBlendModes[1];
1069 }
1070 }
1071
1072 for (i = 0; i < NVKMS_KAPI_LAYER_MAX; i++) {
1073 info->caps.layer[i].validRRTransforms =
1074 device->caps.validLayerRRTransforms;
1075 }
1076
1077 info->caps.maxWidthInPixels = device->caps.maxWidthInPixels;
1078 info->caps.maxHeightInPixels = device->caps.maxHeightInPixels;
1079 info->caps.maxCursorSizeInPixels = device->caps.maxCursorSizeInPixels;
1080
1081 info->caps.pitchAlignment = NV_EVO_PITCH_ALIGNMENT;
1082
1083 info->caps.supportsSyncpts = device->supportsSyncpts;
1084
1085 info->caps.supportedCursorSurfaceMemoryFormats =
1086 NVBIT(NvKmsSurfaceMemoryFormatA8R8G8B8);
1087
1088 ct_assert(sizeof(info->supportedSurfaceMemoryFormats) ==
1089 sizeof(device->supportedSurfaceMemoryFormats));
1090
1091 nvkms_memcpy(info->supportedSurfaceMemoryFormats,
1092 device->supportedSurfaceMemoryFormats,
1093 sizeof(device->supportedSurfaceMemoryFormats));
1094
1095 ct_assert(sizeof(info->supportsHDR) ==
1096 sizeof(device->supportsHDR));
1097
1098 nvkms_memcpy(info->supportsHDR,
1099 device->supportsHDR,
1100 sizeof(device->supportsHDR));
1101 done:
1102
1103 return status;
1104 }
1105
1106 /*
1107 * XXX Make it per-connector, query valid dpyId list as dynamic data of
1108 * connector.
1109 */
GetDisplays(struct NvKmsKapiDevice * device,NvU32 * numDisplays,NvKmsKapiDisplay * displayHandles)1110 static NvBool GetDisplays
1111 (
1112 struct NvKmsKapiDevice *device,
1113 NvU32 *numDisplays, NvKmsKapiDisplay *displayHandles
1114 )
1115 {
1116 struct NvKmsQueryDispParams paramsDisp = { };
1117 NvBool status = NV_FALSE;
1118
1119 NVDpyId dpyId;
1120
1121 if (device->hKmsDevice == 0x0) {
1122 *numDisplays = 0;
1123 return NV_TRUE;
1124 }
1125
1126 paramsDisp.request.deviceHandle = device->hKmsDevice;
1127 paramsDisp.request.dispHandle = device->hKmsDisp;
1128
1129 status = nvkms_ioctl_from_kapi(device->pKmsOpen,
1130 NVKMS_IOCTL_QUERY_DISP,
1131 ¶msDisp, sizeof(paramsDisp));
1132
1133 if (!status)
1134 {
1135 nvKmsKapiLogDeviceDebug(
1136 device,
1137 "Failed to query display engine information");
1138
1139 return NV_FALSE;
1140 }
1141
1142 if (*numDisplays == 0) {
1143 goto done;
1144 }
1145
1146 if (*numDisplays < nvCountDpyIdsInDpyIdList(paramsDisp.reply.validDpys)) {
1147 nvKmsKapiLogDebug(
1148 "Size of display handle array is less than number of displays");
1149 goto done;
1150 }
1151
1152 FOR_ALL_DPY_IDS(dpyId, paramsDisp.reply.validDpys) {
1153 *(displayHandles++) = nvDpyIdToNvU32(dpyId);
1154 }
1155
1156 done:
1157
1158 *numDisplays = nvCountDpyIdsInDpyIdList(paramsDisp.reply.validDpys);
1159
1160 return NV_TRUE;
1161 }
1162
GetConnectorInfo(struct NvKmsKapiDevice * device,NvKmsKapiConnector connector,struct NvKmsKapiConnectorInfo * info)1163 static NvBool GetConnectorInfo
1164 (
1165 struct NvKmsKapiDevice *device,
1166 NvKmsKapiConnector connector, struct NvKmsKapiConnectorInfo *info
1167 )
1168 {
1169 struct NvKmsQueryConnectorStaticDataParams paramsConnector = { };
1170 NvBool status = NV_FALSE;
1171
1172 if (device == NULL || info == NULL) {
1173 goto done;
1174 }
1175
1176 paramsConnector.request.deviceHandle = device->hKmsDevice;
1177 paramsConnector.request.dispHandle = device->hKmsDisp;
1178 paramsConnector.request.connectorHandle = connector;
1179
1180 status = nvkms_ioctl_from_kapi(device->pKmsOpen,
1181 NVKMS_IOCTL_QUERY_CONNECTOR_STATIC_DATA,
1182 ¶msConnector, sizeof(paramsConnector));
1183
1184 if (!status) {
1185 nvKmsKapiLogDeviceDebug(
1186 device,
1187 "Failed to query static data of connector 0x%08x",
1188 connector);
1189
1190 goto done;
1191 }
1192
1193 info->handle = connector;
1194
1195 info->physicalIndex = paramsConnector.reply.physicalIndex;
1196
1197 info->signalFormat = paramsConnector.reply.signalFormat;
1198
1199 info->type = paramsConnector.reply.type;
1200
1201 done:
1202
1203 return status;
1204 }
1205
GetStaticDisplayInfo(struct NvKmsKapiDevice * device,NvKmsKapiDisplay display,struct NvKmsKapiStaticDisplayInfo * info)1206 static NvBool GetStaticDisplayInfo
1207 (
1208 struct NvKmsKapiDevice *device,
1209 NvKmsKapiDisplay display, struct NvKmsKapiStaticDisplayInfo *info
1210 )
1211 {
1212 struct NvKmsQueryDpyStaticDataParams paramsDpyStatic = { };
1213 NvBool status = NV_FALSE;
1214
1215 if (device == NULL || info == NULL) {
1216 goto done;
1217 }
1218
1219 /* Query static data of display */
1220
1221 paramsDpyStatic.request.deviceHandle = device->hKmsDevice;
1222 paramsDpyStatic.request.dispHandle = device->hKmsDisp;
1223
1224 paramsDpyStatic.request.dpyId = nvNvU32ToDpyId(display);
1225
1226 status = nvkms_ioctl_from_kapi(device->pKmsOpen,
1227 NVKMS_IOCTL_QUERY_DPY_STATIC_DATA,
1228 ¶msDpyStatic, sizeof(paramsDpyStatic));
1229
1230 if (!status) {
1231 nvKmsKapiLogDeviceDebug(
1232 device,
1233 "Failed to query static data of dpy 0x%08x",
1234 display);
1235
1236 goto done;
1237 }
1238
1239 info->handle = display;
1240
1241 info->connectorHandle = paramsDpyStatic.reply.connectorHandle;
1242
1243 ct_assert(sizeof(info->dpAddress) ==
1244 sizeof(paramsDpyStatic.reply.dpAddress));
1245
1246 nvkms_memcpy(info->dpAddress,
1247 paramsDpyStatic.reply.dpAddress,
1248 sizeof(paramsDpyStatic.reply.dpAddress));
1249 info->dpAddress[sizeof(paramsDpyStatic.reply.dpAddress) - 1] = '\0';
1250
1251 info->internal = paramsDpyStatic.reply.mobileInternal;
1252 info->headMask = paramsDpyStatic.reply.headMask;
1253 done:
1254
1255 return status;
1256 }
1257
GetDynamicDisplayInfo(struct NvKmsKapiDevice * device,struct NvKmsKapiDynamicDisplayParams * params)1258 static NvBool GetDynamicDisplayInfo(
1259 struct NvKmsKapiDevice *device,
1260 struct NvKmsKapiDynamicDisplayParams *params)
1261 {
1262 struct NvKmsQueryDpyDynamicDataParams *pParamsDpyDynamic = NULL;
1263 NvBool status = NV_FALSE;
1264
1265 if (device == NULL || params == NULL) {
1266 goto done;
1267 }
1268
1269 pParamsDpyDynamic = nvKmsKapiCalloc(1, sizeof(*pParamsDpyDynamic));
1270
1271 if (pParamsDpyDynamic == NULL) {
1272 goto done;
1273 }
1274
1275 pParamsDpyDynamic->request.deviceHandle = device->hKmsDevice;
1276 pParamsDpyDynamic->request.dispHandle = device->hKmsDisp;
1277
1278 pParamsDpyDynamic->request.dpyId = nvNvU32ToDpyId(params->handle);
1279
1280 if (params->overrideEdid) {
1281 ct_assert(sizeof(params->edid.buffer) ==
1282 sizeof(pParamsDpyDynamic->reply.edid.buffer));
1283 nvkms_memcpy(
1284 pParamsDpyDynamic->request.edid.buffer,
1285 params->edid.buffer,
1286 sizeof(pParamsDpyDynamic->request.edid.buffer));
1287
1288 pParamsDpyDynamic->request.edid.bufferSize = params->edid.bufferSize;
1289
1290 pParamsDpyDynamic->request.overrideEdid = NV_TRUE;
1291 }
1292
1293 pParamsDpyDynamic->request.forceConnected = params->forceConnected;
1294
1295 pParamsDpyDynamic->request.forceDisconnected = params->forceDisconnected;
1296
1297 status = nvkms_ioctl_from_kapi(device->pKmsOpen,
1298 NVKMS_IOCTL_QUERY_DPY_DYNAMIC_DATA,
1299 pParamsDpyDynamic, sizeof(*pParamsDpyDynamic));
1300
1301 if (!status) {
1302 nvKmsKapiLogDeviceDebug(
1303 device,
1304 "Failed to query dynamic data of dpy 0x%08x",
1305 params->handle);
1306
1307 goto done;
1308 }
1309
1310 params->connected = pParamsDpyDynamic->reply.connected;
1311
1312 if (pParamsDpyDynamic->reply.connected && !params->overrideEdid) {
1313 NvBool vrrSupported =
1314 (pParamsDpyDynamic->reply.vrrType != NVKMS_DPY_VRR_TYPE_NONE) ? NV_TRUE : NV_FALSE;
1315
1316 nvkms_memcpy(
1317 params->edid.buffer,
1318 pParamsDpyDynamic->reply.edid.buffer,
1319 sizeof(params->edid.buffer));
1320
1321 params->edid.bufferSize = pParamsDpyDynamic->reply.edid.bufferSize;
1322 params->vrrSupported = (vrrSupported && !device->caps.requiresVrrSemaphores) ? NV_TRUE : NV_FALSE;
1323 }
1324
1325 done:
1326
1327 if (pParamsDpyDynamic != NULL) {
1328 nvKmsKapiFree(pParamsDpyDynamic);
1329 }
1330
1331 return status;
1332 }
1333
FreeMemory(struct NvKmsKapiDevice * device,struct NvKmsKapiMemory * memory)1334 static void FreeMemory
1335 (
1336 struct NvKmsKapiDevice *device, struct NvKmsKapiMemory *memory
1337 )
1338 {
1339 if (device == NULL || memory == NULL) {
1340 return;
1341 }
1342
1343 if (memory->hRmHandle != 0x0) {
1344 NvU32 ret;
1345
1346 ret = nvRmApiFree(device->hRmClient,
1347 device->hRmDevice,
1348 memory->hRmHandle);
1349
1350 if (ret != NVOS_STATUS_SUCCESS) {
1351 nvKmsKapiLogDeviceDebug(
1352 device,
1353 "Failed to free RM memory object 0x%08x allocated for "
1354 "NvKmsKapiMemory 0x%p",
1355 memory->hRmHandle, memory);
1356 }
1357
1358 nvKmsKapiFreeRmHandle(device, memory->hRmHandle);
1359 }
1360
1361 nvKmsKapiFree(memory);
1362 }
1363
AllocMemoryObjectAndHandle(struct NvKmsKapiDevice * device,NvU32 * handleOut)1364 static struct NvKmsKapiMemory *AllocMemoryObjectAndHandle(
1365 struct NvKmsKapiDevice *device,
1366 NvU32 *handleOut
1367 )
1368 {
1369 struct NvKmsKapiMemory *memory;
1370
1371 /* Allocate the container object */
1372
1373 memory = nvKmsKapiCalloc(1, sizeof(*memory));
1374
1375 if (memory == NULL) {
1376 nvKmsKapiLogDebug(
1377 "Failed to allocate memory for NVKMS memory object on "
1378 "NvKmsKapiDevice 0x%p",
1379 device);
1380 return NULL;
1381 }
1382
1383 /* Generate RM handle for memory object */
1384
1385 *handleOut = nvKmsKapiGenerateRmHandle(device);
1386
1387 if (*handleOut == 0x0) {
1388 nvKmsKapiLogDeviceDebug(
1389 device,
1390 "Failed to allocate RM handle for memory");
1391 nvKmsKapiFree(memory);
1392 return NULL;
1393 }
1394
1395 return memory;
1396 }
1397
AllocateVideoMemory(struct NvKmsKapiDevice * device,enum NvKmsSurfaceMemoryLayout layout,enum NvKmsKapiAllocationType type,NvU64 size,NvU8 * compressible)1398 static struct NvKmsKapiMemory* AllocateVideoMemory
1399 (
1400 struct NvKmsKapiDevice *device,
1401 enum NvKmsSurfaceMemoryLayout layout,
1402 enum NvKmsKapiAllocationType type,
1403 NvU64 size,
1404 NvU8 *compressible
1405 )
1406 {
1407 struct NvKmsKapiMemory *memory = NULL;
1408 NvU32 hRmHandle;
1409
1410 memory = AllocMemoryObjectAndHandle(device, &hRmHandle);
1411
1412 if (!memory) {
1413 return NULL;
1414 }
1415
1416 if (!nvKmsKapiAllocateVideoMemory(device,
1417 hRmHandle,
1418 layout,
1419 size,
1420 type,
1421 compressible)) {
1422 nvKmsKapiFreeRmHandle(device, hRmHandle);
1423 FreeMemory(device, memory);
1424 return NULL;
1425 }
1426
1427 memory->hRmHandle = hRmHandle;
1428 memory->size = size;
1429 memory->surfaceParams.layout = layout;
1430
1431 if (layout == NvKmsSurfaceMemoryLayoutBlockLinear) {
1432 memory->surfaceParams.blockLinear.genericMemory = NV_TRUE;
1433 }
1434
1435 return memory;
1436 }
1437
AllocateSystemMemory(struct NvKmsKapiDevice * device,enum NvKmsSurfaceMemoryLayout layout,enum NvKmsKapiAllocationType type,NvU64 size,NvU8 * compressible)1438 static struct NvKmsKapiMemory* AllocateSystemMemory
1439 (
1440 struct NvKmsKapiDevice *device,
1441 enum NvKmsSurfaceMemoryLayout layout,
1442 enum NvKmsKapiAllocationType type,
1443 NvU64 size,
1444 NvU8 *compressible
1445 )
1446 {
1447 struct NvKmsKapiMemory *memory = NULL;
1448 NvU32 hRmHandle;
1449
1450 memory = AllocMemoryObjectAndHandle(device, &hRmHandle);
1451
1452 if (!memory) {
1453 return NULL;
1454 }
1455
1456 if (!nvKmsKapiAllocateSystemMemory(device,
1457 hRmHandle,
1458 layout,
1459 size,
1460 type,
1461 compressible)) {
1462 nvKmsKapiFreeRmHandle(device, hRmHandle);
1463 FreeMemory(device, memory);
1464 return NULL;
1465 }
1466
1467 memory->hRmHandle = hRmHandle;
1468 memory->size = size;
1469 memory->surfaceParams.layout = layout;
1470
1471 if (layout == NvKmsSurfaceMemoryLayoutBlockLinear) {
1472 memory->surfaceParams.blockLinear.genericMemory = NV_TRUE;
1473 }
1474
1475 return memory;
1476 }
1477
ImportMemory(struct NvKmsKapiDevice * device,NvU64 memorySize,NvU64 nvKmsParamsUser,NvU64 nvKmsParamsSize)1478 static struct NvKmsKapiMemory* ImportMemory
1479 (
1480 struct NvKmsKapiDevice *device,
1481 NvU64 memorySize,
1482 NvU64 nvKmsParamsUser,
1483 NvU64 nvKmsParamsSize
1484 )
1485 {
1486 struct NvKmsKapiPrivImportMemoryParams nvKmsParams, *pNvKmsParams = NULL;
1487 NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS importParams = { };
1488 struct NvKmsKapiMemory *memory = NULL;
1489 NvU32 hMemory;
1490 NvU32 ret;
1491 int status;
1492
1493 /* Verify the driver-private params size and copy it in from userspace */
1494
1495 if (nvKmsParamsSize != sizeof(nvKmsParams)) {
1496 nvKmsKapiLogDebug(
1497 "NVKMS private memory import parameter size mismatch - "
1498 "expected: 0x%llx, caller specified: 0x%llx",
1499 (NvU64)sizeof(nvKmsParams), nvKmsParamsSize);
1500 return NULL;
1501 }
1502
1503 /*
1504 * Use a heap allocation as the destination pointer passed to
1505 * nvkms_copyin; stack allocations created within core NVKMS may not
1506 * be recognizable to the Linux kernel's CONFIG_HARDENED_USERCOPY
1507 * checker, triggering false errors. But then save the result to a
1508 * variable on the stack, so that we can free the heap memory
1509 * immediately and not worry about its lifetime.
1510 */
1511
1512 pNvKmsParams = nvKmsKapiCalloc(1, sizeof(*pNvKmsParams));
1513
1514 if (pNvKmsParams == NULL) {
1515 nvKmsKapiLogDebug("Failed to allocate memory for ImportMemory");
1516 return NULL;
1517 }
1518
1519 status = nvkms_copyin(pNvKmsParams, nvKmsParamsUser, sizeof(*pNvKmsParams));
1520
1521 nvKmsParams = *pNvKmsParams;
1522
1523 nvKmsKapiFree(pNvKmsParams);
1524
1525 if (status != 0) {
1526 nvKmsKapiLogDebug(
1527 "NVKMS private memory import parameters could not be read from "
1528 "userspace");
1529 return NULL;
1530 }
1531
1532 memory = AllocMemoryObjectAndHandle(device, &hMemory);
1533
1534 if (!memory) {
1535 return NULL;
1536 }
1537
1538 importParams.fd = nvKmsParams.memFd;
1539 importParams.object.type = NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE_RM;
1540 importParams.object.data.rmObject.hDevice = device->hRmDevice;
1541 importParams.object.data.rmObject.hParent = device->hRmDevice;
1542 importParams.object.data.rmObject.hObject = hMemory;
1543
1544 ret = nvRmApiControl(device->hRmClient,
1545 device->hRmClient,
1546 NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_FROM_FD,
1547 &importParams,
1548 sizeof(importParams));
1549
1550 if (ret != NVOS_STATUS_SUCCESS) {
1551 nvKmsKapiLogDeviceDebug(
1552 device,
1553 "Failed to import RM memory object (%d) of size %llu bytes",
1554 nvKmsParams.memFd, memorySize);
1555
1556 nvKmsKapiFreeRmHandle(device, hMemory);
1557 goto failed;
1558 }
1559
1560 memory->hRmHandle = hMemory;
1561 memory->size = memorySize;
1562 memory->surfaceParams = nvKmsParams.surfaceParams;
1563
1564 return memory;
1565
1566 failed:
1567
1568 FreeMemory(device, memory);
1569
1570 return NULL;
1571 }
1572
DupMemory(struct NvKmsKapiDevice * device,const struct NvKmsKapiDevice * srcDevice,const struct NvKmsKapiMemory * srcMemory)1573 static struct NvKmsKapiMemory* DupMemory
1574 (
1575 struct NvKmsKapiDevice *device,
1576 const struct NvKmsKapiDevice *srcDevice,
1577 const struct NvKmsKapiMemory *srcMemory
1578 )
1579 {
1580 struct NvKmsKapiMemory *memory;
1581 NvU32 hMemory;
1582 NvU32 ret;
1583
1584 memory = AllocMemoryObjectAndHandle(device, &hMemory);
1585
1586 if (!memory) {
1587 return NULL;
1588 }
1589
1590 ret = nvRmApiDupObject(device->hRmClient,
1591 device->hRmDevice,
1592 hMemory,
1593 srcDevice->hRmClient,
1594 srcMemory->hRmHandle,
1595 0);
1596
1597 if (ret != NVOS_STATUS_SUCCESS) {
1598 nvKmsKapiLogDeviceDebug(
1599 device,
1600 "Failed to dup NVKMS memory object 0x%p (0x%08x, 0x%08x) "
1601 "of size %llu bytes",
1602 srcMemory, srcDevice->hRmClient, srcMemory->hRmHandle,
1603 srcMemory->size);
1604
1605 nvKmsKapiFreeRmHandle(device, hMemory);
1606 goto failed;
1607 }
1608
1609 memory->hRmHandle = hMemory;
1610 memory->size = srcMemory->size;
1611 memory->surfaceParams = srcMemory->surfaceParams;
1612
1613 return memory;
1614
1615 failed:
1616 FreeMemory(device, memory);
1617
1618 return NULL;
1619 }
1620
ExportMemory(const struct NvKmsKapiDevice * device,const struct NvKmsKapiMemory * memory,NvU64 nvKmsParamsUser,NvU64 nvKmsParamsSize)1621 static NvBool ExportMemory
1622 (
1623 const struct NvKmsKapiDevice *device,
1624 const struct NvKmsKapiMemory *memory,
1625 NvU64 nvKmsParamsUser,
1626 NvU64 nvKmsParamsSize
1627 )
1628 {
1629 struct NvKmsKapiPrivExportMemoryParams nvKmsParams, *pNvKmsParams = NULL;
1630 NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS exportParams = { };
1631 int status;
1632 NvU32 ret;
1633
1634 if (device == NULL || memory == NULL) {
1635 nvKmsKapiLogDebug(
1636 "Invalid device or memory parameter while exporting memory");
1637 return NV_FALSE;
1638 }
1639
1640 /* Verify the driver-private params size and copy it in from userspace */
1641
1642 if (nvKmsParamsSize != sizeof(nvKmsParams)) {
1643 nvKmsKapiLogDebug(
1644 "NVKMS private memory export parameter size mismatch - "
1645 "expected: 0x%llx, caller specified: 0x%llx",
1646 (NvU64)sizeof(nvKmsParams), nvKmsParamsSize);
1647 return NV_FALSE;
1648 }
1649
1650 /*
1651 * Use a heap allocation as the destination pointer passed to
1652 * nvkms_copyin; stack allocations created within core NVKMS may not
1653 * be recognizable to the Linux kernel's CONFIG_HARDENED_USERCOPY
1654 * checker, triggering false errors. But then save the result to a
1655 * variable on the stack, so that we can free the heap memory
1656 * immediately and not worry about its lifetime.
1657 */
1658
1659 pNvKmsParams = nvKmsKapiCalloc(1, sizeof(*pNvKmsParams));
1660
1661 if (pNvKmsParams == NULL) {
1662 nvKmsKapiLogDebug("Failed to allocate scratch memory for ExportMemory");
1663 return NV_FALSE;
1664 }
1665
1666 status = nvkms_copyin(pNvKmsParams, nvKmsParamsUser, sizeof(*pNvKmsParams));
1667
1668 nvKmsParams = *pNvKmsParams;
1669 nvKmsKapiFree(pNvKmsParams);
1670
1671 if (status != 0) {
1672 nvKmsKapiLogDebug(
1673 "NVKMS private memory export parameters could not be read from "
1674 "userspace");
1675 return NV_FALSE;
1676 }
1677
1678 exportParams.fd = nvKmsParams.memFd;
1679 exportParams.object.type = NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE_RM;
1680 exportParams.object.data.rmObject.hDevice = device->hRmDevice;
1681 exportParams.object.data.rmObject.hParent = device->hRmDevice;
1682 exportParams.object.data.rmObject.hObject = memory->hRmHandle;
1683
1684 ret = nvRmApiControl(device->hRmClient,
1685 device->hRmClient,
1686 NV0000_CTRL_CMD_OS_UNIX_EXPORT_OBJECT_TO_FD,
1687 &exportParams,
1688 sizeof(exportParams));
1689
1690 if (ret != NVOS_STATUS_SUCCESS) {
1691 nvKmsKapiLogDeviceDebug(
1692 device,
1693 "Failed to export RM memory object of size %llu bytes "
1694 "to (%d)", memory->size, nvKmsParams.memFd);
1695 return NV_FALSE;
1696 }
1697
1698 return NV_TRUE;
1699 }
1700
1701 static struct NvKmsKapiMemory*
GetSystemMemoryHandleFromDmaBufSgtHelper(struct NvKmsKapiDevice * device,NvU32 descriptorType,NvP64 descriptor,NvU32 limit)1702 GetSystemMemoryHandleFromDmaBufSgtHelper(struct NvKmsKapiDevice *device,
1703 NvU32 descriptorType,
1704 NvP64 descriptor,
1705 NvU32 limit)
1706 {
1707 NvU32 ret;
1708 NV_OS_DESC_MEMORY_ALLOCATION_PARAMS memAllocParams = {0};
1709 struct NvKmsKapiMemory *memory = NULL;
1710 NvU32 hRmHandle;
1711
1712 memory = AllocMemoryObjectAndHandle(device, &hRmHandle);
1713
1714 if (!memory) {
1715 return NULL;
1716 }
1717
1718 memAllocParams.type = NVOS32_TYPE_PRIMARY;
1719 memAllocParams.descriptorType = descriptorType;
1720 memAllocParams.descriptor = descriptor;
1721 memAllocParams.limit = limit;
1722
1723 memAllocParams.attr =
1724 FLD_SET_DRF(OS32, _ATTR, _LOCATION, _PCI, memAllocParams.attr);
1725
1726 memAllocParams.attr2 =
1727 FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _NO, memAllocParams.attr2);
1728
1729 /* dmabuf import is currently only used for ISO memory. */
1730 if (!device->isoIOCoherencyModes.coherent) {
1731 memAllocParams.attr =
1732 FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_COMBINE,
1733 memAllocParams.attr);
1734 } else {
1735 memAllocParams.attr =
1736 FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_BACK,
1737 memAllocParams.attr);
1738 }
1739
1740 ret = nvRmApiAlloc(device->hRmClient,
1741 device->hRmDevice,
1742 hRmHandle,
1743 NV01_MEMORY_SYSTEM_OS_DESCRIPTOR,
1744 &memAllocParams);
1745 if (ret != NVOS_STATUS_SUCCESS) {
1746 nvKmsKapiLogDeviceDebug(
1747 device,
1748 "nvRmApiAlloc failed with error code 0x%08x",
1749 ret);
1750 nvKmsKapiFreeRmHandle(device, hRmHandle);
1751 FreeMemory(device, memory);
1752 return NULL;
1753 }
1754
1755 memory->hRmHandle = hRmHandle;
1756 memory->size = limit + 1;
1757 memory->surfaceParams.layout = NvKmsSurfaceMemoryLayoutPitch;
1758
1759 return memory;
1760 }
1761
1762 static struct NvKmsKapiMemory*
GetSystemMemoryHandleFromSgt(struct NvKmsKapiDevice * device,NvP64 sgt,NvP64 gem,NvU32 limit)1763 GetSystemMemoryHandleFromSgt(struct NvKmsKapiDevice *device,
1764 NvP64 sgt,
1765 NvP64 gem,
1766 NvU32 limit)
1767 {
1768 NVOS32_DESCRIPTOR_TYPE_OS_SGT_PTR_PARAMETERS params = {
1769 .sgt = sgt,
1770 .gem = gem
1771 };
1772
1773 return GetSystemMemoryHandleFromDmaBufSgtHelper(
1774 device, NVOS32_DESCRIPTOR_TYPE_OS_SGT_PTR, ¶ms, limit);
1775 }
1776
1777 static struct NvKmsKapiMemory*
GetSystemMemoryHandleFromDmaBuf(struct NvKmsKapiDevice * device,NvP64 dmaBuf,NvU32 limit)1778 GetSystemMemoryHandleFromDmaBuf(struct NvKmsKapiDevice *device,
1779 NvP64 dmaBuf,
1780 NvU32 limit)
1781 {
1782 return GetSystemMemoryHandleFromDmaBufSgtHelper(
1783 device, NVOS32_DESCRIPTOR_TYPE_OS_DMA_BUF_PTR, dmaBuf, limit);
1784 }
1785
RmGc6BlockerRefCntAction(const struct NvKmsKapiDevice * device,NvU32 action)1786 static NvBool RmGc6BlockerRefCntAction(const struct NvKmsKapiDevice *device,
1787 NvU32 action)
1788 {
1789 NV_STATUS status;
1790 NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS params = { 0 };
1791
1792 nvAssert((action == NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_INC) ||
1793 (action == NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_DEC));
1794
1795 params.action = action;
1796
1797 status = nvRmApiControl(device->hRmClient,
1798 device->hRmSubDevice,
1799 NV2080_CTRL_CMD_OS_UNIX_GC6_BLOCKER_REFCNT,
1800 ¶ms,
1801 sizeof(params));
1802 if (status != NV_OK) {
1803 nvKmsKapiLogDeviceDebug(
1804 device,
1805 "Failed to modify GC6 blocker refcount for 0x%x, status: 0x%x",
1806 device->hRmSubDevice, status);
1807 return NV_FALSE;
1808 }
1809
1810 return NV_TRUE;
1811 }
1812
RmGc6BlockerRefCntInc(const struct NvKmsKapiDevice * device)1813 static NvBool RmGc6BlockerRefCntInc(const struct NvKmsKapiDevice *device)
1814 {
1815 return RmGc6BlockerRefCntAction(
1816 device,
1817 NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_INC);
1818 }
1819
RmGc6BlockerRefCntDec(const struct NvKmsKapiDevice * device)1820 static NvBool RmGc6BlockerRefCntDec(const struct NvKmsKapiDevice *device)
1821 {
1822 return RmGc6BlockerRefCntAction(
1823 device,
1824 NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_DEC);
1825 }
1826
GetMemoryPages(const struct NvKmsKapiDevice * device,const struct NvKmsKapiMemory * memory,NvU64 ** pPages,NvU32 * pNumPages)1827 static NvBool GetMemoryPages
1828 (
1829 const struct NvKmsKapiDevice *device,
1830 const struct NvKmsKapiMemory *memory,
1831 NvU64 **pPages,
1832 NvU32 *pNumPages
1833 )
1834 {
1835 NV003E_CTRL_GET_SURFACE_NUM_PHYS_PAGES_PARAMS paramsGetNumPages = {};
1836 NV003E_CTRL_GET_SURFACE_PHYS_PAGES_PARAMS paramsGetPages = {};
1837 NvU64 *pages;
1838 NV_STATUS status;
1839
1840 if (device == NULL || memory == NULL) {
1841 return NV_FALSE;
1842 }
1843
1844 status = nvRmApiControl(device->hRmClient,
1845 memory->hRmHandle,
1846 NV003E_CTRL_CMD_GET_SURFACE_NUM_PHYS_PAGES,
1847 ¶msGetNumPages,
1848 sizeof(paramsGetNumPages));
1849 if (status != NV_OK) {
1850 nvKmsKapiLogDeviceDebug(device,
1851 "Failed to get number of physical allocation pages for RM"
1852 "memory object 0x%x", memory->hRmHandle);
1853 return NV_FALSE;
1854 }
1855
1856 if (!paramsGetNumPages.numPages) {
1857 return NV_FALSE;
1858 }
1859
1860 pages = nvKmsKapiCalloc(paramsGetNumPages.numPages, sizeof(pages));
1861 if (!pages) {
1862 nvKmsKapiLogDeviceDebug(device, "Failed to allocate memory");
1863 return NV_FALSE;
1864 }
1865
1866 paramsGetPages.pPages = NV_PTR_TO_NvP64(pages);
1867 paramsGetPages.numPages = paramsGetNumPages.numPages;
1868
1869 status = nvRmApiControl(device->hRmClient,
1870 memory->hRmHandle,
1871 NV003E_CTRL_CMD_GET_SURFACE_PHYS_PAGES,
1872 ¶msGetPages,
1873 sizeof(paramsGetPages));
1874 if (status != NV_OK) {
1875 nvKmsKapiFree(pages);
1876 nvKmsKapiLogDeviceDebug(device,
1877 "Failed to get physical allocation pages for RM"
1878 "memory object 0x%x", memory->hRmHandle);
1879 return NV_FALSE;
1880 }
1881
1882 nvAssert(paramsGetPages.numPages == paramsGetNumPages.numPages);
1883
1884 *pPages = pages;
1885 *pNumPages = paramsGetPages.numPages;
1886
1887 return NV_TRUE;
1888 }
1889
1890 /*
1891 * Check if the memory we are creating this framebuffer with is valid. We
1892 * cannot scan out sysmem or compressed buffers.
1893 *
1894 * If we cannot use this memory for display it may be resident in sysmem
1895 * or may belong to another GPU.
1896 */
IsMemoryValidForDisplay(const struct NvKmsKapiDevice * device,const struct NvKmsKapiMemory * memory)1897 static NvBool IsMemoryValidForDisplay
1898 (
1899 const struct NvKmsKapiDevice *device,
1900 const struct NvKmsKapiMemory *memory
1901 )
1902 {
1903 NV_STATUS status;
1904 NV0041_CTRL_SURFACE_INFO surfaceInfo = {};
1905 NV0041_CTRL_GET_SURFACE_INFO_PARAMS surfaceInfoParams = {};
1906
1907 if (device == NULL || memory == NULL) {
1908 return NV_FALSE;
1909 }
1910
1911 /*
1912 * Don't do these checks on tegra. Tegra has different capabilities.
1913 * Here we always say display is possible so we never fail framebuffer
1914 * creation.
1915 */
1916 if (device->isSOC) {
1917 return NV_TRUE;
1918 }
1919
1920 /* Get the type of address space this memory is in, i.e. vidmem or sysmem */
1921 surfaceInfo.index = NV0041_CTRL_SURFACE_INFO_INDEX_ADDR_SPACE_TYPE;
1922
1923 surfaceInfoParams.surfaceInfoListSize = 1;
1924 surfaceInfoParams.surfaceInfoList = (NvP64)&surfaceInfo;
1925
1926 status = nvRmApiControl(device->hRmClient,
1927 memory->hRmHandle,
1928 NV0041_CTRL_CMD_GET_SURFACE_INFO,
1929 &surfaceInfoParams,
1930 sizeof(surfaceInfoParams));
1931 if (status != NV_OK) {
1932 nvKmsKapiLogDeviceDebug(device,
1933 "Failed to get memory location of RM memory object 0x%x",
1934 memory->hRmHandle);
1935 return NV_FALSE;
1936 }
1937
1938 return surfaceInfo.data == NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_VIDMEM;
1939 }
1940
FreeMemoryPages(NvU64 * pPages)1941 static void FreeMemoryPages
1942 (
1943 NvU64 *pPages
1944 )
1945 {
1946 nvKmsKapiFree(pPages);
1947 }
1948
MapMemory(const struct NvKmsKapiDevice * device,const struct NvKmsKapiMemory * memory,NvKmsKapiMappingType type,void ** ppLinearAddress)1949 static NvBool MapMemory
1950 (
1951 const struct NvKmsKapiDevice *device,
1952 const struct NvKmsKapiMemory *memory, NvKmsKapiMappingType type,
1953 void **ppLinearAddress
1954 )
1955 {
1956 NV_STATUS status;
1957 NvU32 flags = 0;
1958
1959 if (device == NULL || memory == NULL) {
1960 return NV_FALSE;
1961 }
1962
1963 switch (type) {
1964 case NVKMS_KAPI_MAPPING_TYPE_USER:
1965 /*
1966 * Usermode clients can't be trusted not to access mappings while
1967 * the GPU is in GC6.
1968 *
1969 * TODO: Revoke/restore mappings rather than blocking GC6
1970 */
1971 if (!RmGc6BlockerRefCntInc(device)) {
1972 return NV_FALSE;
1973 }
1974 flags |= DRF_DEF(OS33, _FLAGS, _MEM_SPACE, _USER);
1975 break;
1976 case NVKMS_KAPI_MAPPING_TYPE_KERNEL:
1977 /*
1978 * Kernel clients should ensure on their own that the GPU isn't in
1979 * GC6 before making accesses to mapped vidmem surfaces.
1980 */
1981 break;
1982 }
1983
1984 status = nvRmApiMapMemory(
1985 device->hRmClient,
1986 device->hRmSubDevice,
1987 memory->hRmHandle,
1988 0,
1989 memory->size,
1990 ppLinearAddress,
1991 flags);
1992
1993 if (status != NV_OK) {
1994 nvKmsKapiLogDeviceDebug(
1995 device,
1996 "Failed to Map RM memory object 0x%x allocated for NVKMemory 0x%p",
1997 memory->hRmHandle, memory);
1998 if (type == NVKMS_KAPI_MAPPING_TYPE_USER) {
1999 RmGc6BlockerRefCntDec(device); // XXX Can't handle failure.
2000 }
2001 return NV_FALSE;
2002 }
2003
2004 return NV_TRUE;
2005 }
2006
UnmapMemory(const struct NvKmsKapiDevice * device,const struct NvKmsKapiMemory * memory,NvKmsKapiMappingType type,const void * pLinearAddress)2007 static void UnmapMemory
2008 (
2009 const struct NvKmsKapiDevice *device,
2010 const struct NvKmsKapiMemory *memory, NvKmsKapiMappingType type,
2011 const void *pLinearAddress
2012 )
2013 {
2014 NV_STATUS status;
2015 NvU32 flags = 0;
2016
2017 if (device == NULL || memory == NULL) {
2018 return;
2019 }
2020
2021 switch (type) {
2022 case NVKMS_KAPI_MAPPING_TYPE_USER:
2023 flags |= DRF_DEF(OS33, _FLAGS, _MEM_SPACE, _USER);
2024 break;
2025 case NVKMS_KAPI_MAPPING_TYPE_KERNEL:
2026 break;
2027 }
2028
2029 status =
2030 nvRmApiUnmapMemory(device->hRmClient,
2031 device->hRmSubDevice,
2032 memory->hRmHandle,
2033 pLinearAddress,
2034 flags);
2035
2036 if (status != NV_OK) {
2037 nvKmsKapiLogDeviceDebug(
2038 device,
2039 "Failed to Ummap RM memory object 0x%x allocated for NVKMemory 0x%p",
2040 memory->hRmHandle, memory);
2041 }
2042
2043 if (type == NVKMS_KAPI_MAPPING_TYPE_USER) {
2044 RmGc6BlockerRefCntDec(device); // XXX Can't handle failure.
2045 }
2046 }
2047
GetSurfaceParams(struct NvKmsKapiCreateSurfaceParams * params,NvU32 * pNumPlanes,enum NvKmsSurfaceMemoryLayout * pLayout,NvU32 * pLog2GobsPerBlockY,NvU32 pitch[])2048 static NvBool GetSurfaceParams(
2049 struct NvKmsKapiCreateSurfaceParams *params,
2050 NvU32 *pNumPlanes,
2051 enum NvKmsSurfaceMemoryLayout *pLayout,
2052 NvU32 *pLog2GobsPerBlockY,
2053 NvU32 pitch[])
2054 {
2055 const NvKmsSurfaceMemoryFormatInfo *pFormatInfo =
2056 nvKmsGetSurfaceMemoryFormatInfo(params->format);
2057 enum NvKmsSurfaceMemoryLayout layout = NvKmsSurfaceMemoryLayoutPitch;
2058 NvU32 log2GobsPerBlockY = 0;
2059 NvU32 i;
2060
2061 if (pFormatInfo->numPlanes == 0)
2062 {
2063 nvKmsKapiLogDebug("Unknown surface format");
2064 return NV_FALSE;
2065 }
2066
2067 for (i = 0; i < pFormatInfo->numPlanes; i++) {
2068 struct NvKmsKapiMemory *memory =
2069 params->planes[i].memory;
2070
2071 if (memory == NULL) {
2072 return FALSE;
2073 }
2074
2075 pitch[i] = params->planes[i].pitch;
2076
2077 if (i == 0) {
2078 if (params->explicit_layout) {
2079 layout = params->layout;
2080 } else {
2081 layout = memory->surfaceParams.layout;
2082 }
2083
2084 switch (layout) {
2085 case NvKmsSurfaceMemoryLayoutBlockLinear:
2086 if (params->explicit_layout) {
2087 log2GobsPerBlockY = params->log2GobsPerBlockY;
2088 } else {
2089 log2GobsPerBlockY =
2090 memory->surfaceParams.blockLinear.log2GobsPerBlock.y;
2091 }
2092 break;
2093
2094 case NvKmsSurfaceMemoryLayoutPitch:
2095 log2GobsPerBlockY = 0;
2096 break;
2097
2098 default:
2099 nvKmsKapiLogDebug("Invalid surface layout: %u", layout);
2100 return NV_FALSE;
2101 }
2102 } else {
2103 if (!params->explicit_layout) {
2104 if (layout != memory->surfaceParams.layout) {
2105 nvKmsKapiLogDebug("All planes are not of same layout");
2106 return FALSE;
2107 }
2108
2109 if (layout == NvKmsSurfaceMemoryLayoutBlockLinear &&
2110 log2GobsPerBlockY !=
2111 memory->surfaceParams.blockLinear.log2GobsPerBlock.y) {
2112
2113 nvKmsKapiLogDebug(
2114 "All planes do not have the same blocklinear parameters");
2115 return FALSE;
2116 }
2117 }
2118 }
2119
2120 if (layout == NvKmsSurfaceMemoryLayoutBlockLinear) {
2121 if (params->explicit_layout) {
2122 if (pitch[i] & 63) {
2123 nvKmsKapiLogDebug(
2124 "Invalid block-linear pitch alignment: %u", pitch[i]);
2125 return NV_FALSE;
2126 }
2127
2128 pitch[i] = pitch[i] >> 6;
2129 } else {
2130 /*
2131 * The caller (nvidia-drm) is not blocklinear-aware, so the
2132 * passed-in pitch cannot accurately reflect block information.
2133 * Override the pitch with what was specified when the surface
2134 * was imported.
2135 */
2136 pitch[i] = memory->surfaceParams.blockLinear.pitchInBlocks;
2137 }
2138 } else {
2139 pitch[i] = params->planes[i].pitch;
2140 }
2141
2142 }
2143
2144 *pNumPlanes = pFormatInfo->numPlanes;
2145 *pLayout = layout;
2146 *pLog2GobsPerBlockY = log2GobsPerBlockY;
2147
2148 return NV_TRUE;
2149 }
CreateSurface(struct NvKmsKapiDevice * device,struct NvKmsKapiCreateSurfaceParams * params)2150 static struct NvKmsKapiSurface* CreateSurface
2151 (
2152 struct NvKmsKapiDevice *device,
2153 struct NvKmsKapiCreateSurfaceParams *params
2154 )
2155 {
2156 struct NvKmsRegisterSurfaceParams paramsReg = { };
2157 NvBool status;
2158
2159 struct NvKmsKapiSurface *surface = NULL;
2160
2161 enum NvKmsSurfaceMemoryLayout layout = NvKmsSurfaceMemoryLayoutPitch;
2162 NvU32 log2GobsPerBlockY = 0;
2163 NvU32 numPlanes = 0;
2164 NvU32 pitch[NVKMS_MAX_PLANES_PER_SURFACE] = { 0 };
2165 NvU32 i;
2166
2167 if (!GetSurfaceParams(params,
2168 &numPlanes,
2169 &layout,
2170 &log2GobsPerBlockY,
2171 pitch))
2172 {
2173 goto failed;
2174 }
2175
2176 surface = nvKmsKapiCalloc(1, sizeof(*surface));
2177
2178 if (surface == NULL) {
2179 nvKmsKapiLogDebug(
2180 "Failed to allocate memory for NVKMS surface object on "
2181 "NvKmsKapiDevice 0x%p",
2182 device);
2183 goto failed;
2184 }
2185
2186 if (device->hKmsDevice == 0x0) {
2187 goto done;
2188 }
2189
2190 /* Create NVKMS surface */
2191
2192 paramsReg.request.deviceHandle = device->hKmsDevice;
2193
2194 paramsReg.request.useFd = FALSE;
2195 paramsReg.request.rmClient = device->hRmClient;
2196
2197 paramsReg.request.widthInPixels = params->width;
2198 paramsReg.request.heightInPixels = params->height;
2199
2200 paramsReg.request.format = params->format;
2201
2202 paramsReg.request.layout = layout;
2203 paramsReg.request.log2GobsPerBlockY = log2GobsPerBlockY;
2204
2205 for (i = 0; i < numPlanes; i++) {
2206 struct NvKmsKapiMemory *memory =
2207 params->planes[i].memory;
2208
2209 paramsReg.request.planes[i].u.rmObject = memory->hRmHandle;
2210 paramsReg.request.planes[i].rmObjectSizeInBytes = memory->size;
2211 paramsReg.request.planes[i].offset = params->planes[i].offset;
2212 paramsReg.request.planes[i].pitch = pitch[i];
2213 }
2214
2215 status = nvkms_ioctl_from_kapi(device->pKmsOpen,
2216 NVKMS_IOCTL_REGISTER_SURFACE,
2217 ¶msReg, sizeof(paramsReg));
2218 if (!status) {
2219 nvKmsKapiLogDeviceDebug(
2220 device,
2221 "Failed to register NVKMS surface of dimensions %ux%u "
2222 "and %s format",
2223 params->width,
2224 params->height,
2225 nvKmsSurfaceMemoryFormatToString(params->format));
2226
2227 goto failed;
2228 }
2229
2230 surface->hKmsHandle = paramsReg.reply.surfaceHandle;
2231
2232 done:
2233 return surface;
2234
2235 failed:
2236 nvKmsKapiFree(surface);
2237
2238 return NULL;
2239 }
2240
DestroySurface(struct NvKmsKapiDevice * device,struct NvKmsKapiSurface * surface)2241 static void DestroySurface
2242 (
2243 struct NvKmsKapiDevice *device, struct NvKmsKapiSurface *surface
2244 )
2245 {
2246 struct NvKmsUnregisterSurfaceParams paramsUnreg = { };
2247 NvBool status;
2248
2249 if (device->hKmsDevice == 0x0) {
2250 goto done;
2251 }
2252
2253 paramsUnreg.request.deviceHandle = device->hKmsDevice;
2254 paramsUnreg.request.surfaceHandle = surface->hKmsHandle;
2255
2256 status = nvkms_ioctl_from_kapi(device->pKmsOpen,
2257 NVKMS_IOCTL_UNREGISTER_SURFACE,
2258 ¶msUnreg, sizeof(paramsUnreg));
2259
2260 if (!status) {
2261 nvKmsKapiLogDeviceDebug(
2262 device,
2263 "Failed to unregister NVKMS surface registered for "
2264 "NvKmsKapiSurface 0x%p",
2265 surface);
2266 }
2267
2268 done:
2269 nvKmsKapiFree(surface);
2270 }
2271
2272 /*
2273 * Helper function to convert NvKmsMode to NvKmsKapiDisplayMode.
2274 */
NvKmsModeToKapi(const struct NvKmsMode * kmsMode,struct NvKmsKapiDisplayMode * mode)2275 static void NvKmsModeToKapi
2276 (
2277 const struct NvKmsMode *kmsMode,
2278 struct NvKmsKapiDisplayMode *mode
2279 )
2280 {
2281 const NvModeTimings *timings = &kmsMode->timings;
2282
2283 nvkms_memset(mode, 0, sizeof(*mode));
2284
2285 mode->timings.refreshRate = timings->RRx1k;
2286 mode->timings.pixelClockHz = timings->pixelClockHz;
2287 mode->timings.hVisible = timings->hVisible;
2288 mode->timings.hSyncStart = timings->hSyncStart;
2289 mode->timings.hSyncEnd = timings->hSyncEnd;
2290 mode->timings.hTotal = timings->hTotal;
2291 mode->timings.hSkew = timings->hSkew;
2292 mode->timings.vVisible = timings->vVisible;
2293 mode->timings.vSyncStart = timings->vSyncStart;
2294 mode->timings.vSyncEnd = timings->vSyncEnd;
2295 mode->timings.vTotal = timings->vTotal;
2296
2297 mode->timings.flags.interlaced = timings->interlaced;
2298 mode->timings.flags.doubleScan = timings->doubleScan;
2299 mode->timings.flags.hSyncPos = timings->hSyncPos;
2300 mode->timings.flags.hSyncNeg = timings->hSyncNeg;
2301 mode->timings.flags.vSyncPos = timings->vSyncPos;
2302 mode->timings.flags.vSyncNeg = timings->vSyncNeg;
2303
2304 mode->timings.widthMM = timings->sizeMM.w;
2305 mode->timings.heightMM = timings->sizeMM.h;
2306
2307 ct_assert(sizeof(mode->name) == sizeof(kmsMode->name));
2308
2309 nvkms_memcpy(mode->name, kmsMode->name, sizeof(mode->name));
2310 }
2311
InitNvKmsModeValidationParams(const struct NvKmsKapiDevice * device,struct NvKmsModeValidationParams * params)2312 static void InitNvKmsModeValidationParams(
2313 const struct NvKmsKapiDevice *device,
2314 struct NvKmsModeValidationParams *params)
2315 {
2316 /*
2317 * Mode timings structures of KAPI clients may not have field like
2318 * RRx1k, it does not guarantee that computed RRx1k value during
2319 * conversion from -
2320 * KAPI client's mode-timings structure
2321 * -> NvKmsKapiDisplayMode -> NvModeTimings
2322 * is same as what we get from edid, this may cause mode-set to fail.
2323 *
2324 * The RRx1k filed don't impact hardware modetiming values, therefore
2325 * override RRx1k check.
2326 *
2327 * XXX NVKMS TODO: Bug 200156338 is filed to delete NvModeTimings::RRx1k
2328 * if possible.
2329 */
2330 params->overrides = NVKMS_MODE_VALIDATION_NO_RRX1K_CHECK;
2331 }
2332
GetDisplayMode(struct NvKmsKapiDevice * device,NvKmsKapiDisplay display,NvU32 modeIndex,struct NvKmsKapiDisplayMode * mode,NvBool * valid,NvBool * preferredMode)2333 static int GetDisplayMode
2334 (
2335 struct NvKmsKapiDevice *device,
2336 NvKmsKapiDisplay display, NvU32 modeIndex,
2337 struct NvKmsKapiDisplayMode *mode, NvBool *valid,
2338 NvBool *preferredMode
2339 )
2340 {
2341 struct NvKmsValidateModeIndexParams paramsValidate = { };
2342 NvBool status;
2343
2344 if (device == NULL) {
2345 return -1;
2346 }
2347
2348 paramsValidate.request.deviceHandle = device->hKmsDevice;
2349 paramsValidate.request.dispHandle = device->hKmsDisp;
2350
2351 paramsValidate.request.dpyId = nvNvU32ToDpyId(display);
2352
2353 InitNvKmsModeValidationParams(device,
2354 ¶msValidate.request.modeValidation);
2355
2356 paramsValidate.request.modeIndex = modeIndex;
2357
2358 status = nvkms_ioctl_from_kapi(device->pKmsOpen,
2359 NVKMS_IOCTL_VALIDATE_MODE_INDEX,
2360 ¶msValidate, sizeof(paramsValidate));
2361
2362 if (!status) {
2363 nvKmsKapiLogDeviceDebug(
2364 device,
2365 "Failed to get validated mode index 0x%x for NvKmsKapiDisplay 0x%08x",
2366 modeIndex, display);
2367 return -1;
2368 }
2369
2370 if (mode != NULL) {
2371 NvKmsModeToKapi(¶msValidate.reply.mode, mode);
2372 }
2373
2374
2375 if (valid != NULL) {
2376 *valid = paramsValidate.reply.valid;
2377 }
2378
2379 if (preferredMode != NULL) {
2380 *preferredMode = paramsValidate.reply.preferredMode;
2381 }
2382
2383 return paramsValidate.reply.end ? 0 : 1;
2384 }
2385
2386 /*
2387 * Helper function to convert NvKmsKapiDisplayMode to NvKmsMode.
2388 */
NvKmsKapiDisplayModeToKapi(const struct NvKmsKapiDisplayMode * mode,struct NvKmsMode * kmsMode)2389 static void NvKmsKapiDisplayModeToKapi
2390 (
2391 const struct NvKmsKapiDisplayMode *mode,
2392 struct NvKmsMode *kmsMode
2393 )
2394 {
2395 NvModeTimings *timings = &kmsMode->timings;
2396
2397 nvkms_memset(kmsMode, 0, sizeof(*kmsMode));
2398
2399 nvkms_memcpy(kmsMode->name, mode->name, sizeof(mode->name));
2400
2401 timings->RRx1k = mode->timings.refreshRate;
2402 timings->pixelClockHz = mode->timings.pixelClockHz;
2403 timings->hVisible = mode->timings.hVisible;
2404 timings->hSyncStart = mode->timings.hSyncStart;
2405 timings->hSyncEnd = mode->timings.hSyncEnd;
2406 timings->hTotal = mode->timings.hTotal;
2407 timings->hSkew = mode->timings.hSkew;
2408 timings->vVisible = mode->timings.vVisible;
2409 timings->vSyncStart = mode->timings.vSyncStart;
2410 timings->vSyncEnd = mode->timings.vSyncEnd;
2411 timings->vTotal = mode->timings.vTotal;
2412
2413 timings->interlaced = mode->timings.flags.interlaced;
2414 timings->doubleScan = mode->timings.flags.doubleScan;
2415 timings->hSyncPos = mode->timings.flags.hSyncPos;
2416 timings->hSyncNeg = mode->timings.flags.hSyncNeg;
2417 timings->vSyncPos = mode->timings.flags.vSyncPos;
2418 timings->vSyncNeg = mode->timings.flags.vSyncNeg;
2419
2420 timings->sizeMM.w = mode->timings.widthMM;
2421 timings->sizeMM.h = mode->timings.heightMM;
2422 }
2423
ValidateDisplayMode(struct NvKmsKapiDevice * device,NvKmsKapiDisplay display,const struct NvKmsKapiDisplayMode * mode)2424 static NvBool ValidateDisplayMode
2425 (
2426 struct NvKmsKapiDevice *device,
2427 NvKmsKapiDisplay display, const struct NvKmsKapiDisplayMode *mode
2428 )
2429 {
2430 struct NvKmsValidateModeParams paramsValidate;
2431 NvBool status;
2432
2433 if (device == NULL) {
2434 return NV_FALSE;
2435 }
2436
2437 nvkms_memset(¶msValidate, 0, sizeof(paramsValidate));
2438
2439 paramsValidate.request.deviceHandle = device->hKmsDevice;
2440 paramsValidate.request.dispHandle = device->hKmsDisp;
2441
2442 paramsValidate.request.dpyId = nvNvU32ToDpyId(display);
2443
2444 InitNvKmsModeValidationParams(device,
2445 ¶msValidate.request.modeValidation);
2446
2447
2448 NvKmsKapiDisplayModeToKapi(mode, ¶msValidate.request.mode);
2449
2450 status = nvkms_ioctl_from_kapi(device->pKmsOpen,
2451 NVKMS_IOCTL_VALIDATE_MODE,
2452 ¶msValidate, sizeof(paramsValidate));
2453
2454 if (!status) {
2455 nvKmsKapiLogDeviceDebug(
2456 device,
2457 "Failed to get validated mode %ux%u@%uHz for NvKmsKapiDisplay 0x%08x of "
2458 "NvKmsKapiDevice 0x%p",
2459 mode->timings.hVisible, mode->timings.vVisible,
2460 mode->timings.refreshRate/1000, display,
2461 device);
2462 return NV_FALSE;
2463 }
2464
2465 return paramsValidate.reply.valid;
2466 }
2467
AssignSyncObjectConfig(struct NvKmsKapiDevice * device,const struct NvKmsKapiLayerConfig * pLayerConfig,struct NvKmsChannelSyncObjects * pSyncObject)2468 static NvBool AssignSyncObjectConfig(
2469 struct NvKmsKapiDevice *device,
2470 const struct NvKmsKapiLayerConfig *pLayerConfig,
2471 struct NvKmsChannelSyncObjects *pSyncObject)
2472 {
2473 if (!device->supportsSyncpts) {
2474 if (pLayerConfig->syncptParams.preSyncptSpecified ||
2475 pLayerConfig->syncptParams.postSyncptRequested) {
2476 return NV_FALSE;
2477 }
2478 }
2479
2480 pSyncObject->useSyncpt = FALSE;
2481
2482 if (pLayerConfig->syncptParams.preSyncptSpecified) {
2483 pSyncObject->useSyncpt = TRUE;
2484
2485 pSyncObject->u.syncpts.pre.type = NVKMS_SYNCPT_TYPE_RAW;
2486 pSyncObject->u.syncpts.pre.u.raw.id = pLayerConfig->syncptParams.preSyncptId;
2487 pSyncObject->u.syncpts.pre.u.raw.value = pLayerConfig->syncptParams.preSyncptValue;
2488 }
2489
2490 if (pLayerConfig->syncptParams.postSyncptRequested) {
2491 pSyncObject->useSyncpt = TRUE;
2492
2493 pSyncObject->u.syncpts.requestedPostType = NVKMS_SYNCPT_TYPE_FD;
2494 }
2495 return NV_TRUE;
2496 }
2497
AssignHDRMetadataConfig(const struct NvKmsKapiLayerConfig * layerConfig,const struct NvKmsKapiLayerRequestedConfig * layerRequestedConfig,const NvU32 layer,struct NvKmsFlipCommonParams * params,NvBool bFromKmsSetMode)2498 static void AssignHDRMetadataConfig(
2499 const struct NvKmsKapiLayerConfig *layerConfig,
2500 const struct NvKmsKapiLayerRequestedConfig *layerRequestedConfig,
2501 const NvU32 layer,
2502 struct NvKmsFlipCommonParams *params,
2503 NvBool bFromKmsSetMode)
2504 {
2505 params->layer[layer].hdr.specified =
2506 bFromKmsSetMode || layerRequestedConfig->flags.hdrMetadataChanged;
2507 params->layer[layer].hdr.enabled =
2508 layerConfig->hdrMetadata.enabled;
2509 if (layerConfig->hdrMetadata.enabled) {
2510 params->layer[layer].hdr.staticMetadata =
2511 layerConfig->hdrMetadata.val;
2512 }
2513 }
2514
NvKmsKapiCursorConfigToKms(const struct NvKmsKapiCursorRequestedConfig * requestedConfig,struct NvKmsFlipCommonParams * params,NvBool bFromKmsSetMode)2515 static void NvKmsKapiCursorConfigToKms(
2516 const struct NvKmsKapiCursorRequestedConfig *requestedConfig,
2517 struct NvKmsFlipCommonParams *params,
2518 NvBool bFromKmsSetMode)
2519 {
2520 if (requestedConfig->flags.surfaceChanged || bFromKmsSetMode) {
2521 params->cursor.imageSpecified = NV_TRUE;
2522
2523 if (requestedConfig->surface != NULL) {
2524 params->cursor.image.surfaceHandle[NVKMS_LEFT] =
2525 requestedConfig->surface->hKmsHandle;
2526 }
2527
2528 params->cursor.image.cursorCompParams.colorKeySelect =
2529 NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE;
2530 params->cursor.image.cursorCompParams.blendingMode[1] =
2531 requestedConfig->compParams.compMode;
2532 params->cursor.image.cursorCompParams.surfaceAlpha =
2533 requestedConfig->compParams.surfaceAlpha;
2534 }
2535
2536 if (requestedConfig->flags.dstXYChanged || bFromKmsSetMode) {
2537 params->cursor.position.x = requestedConfig->dstX;
2538 params->cursor.position.y = requestedConfig->dstY;
2539
2540 params->cursor.positionSpecified = NV_TRUE;
2541 }
2542 }
2543
NvKmsKapiOverlayLayerConfigToKms(struct NvKmsKapiDevice * device,const struct NvKmsKapiLayerRequestedConfig * layerRequestedConfig,const NvU32 layer,const NvU32 head,struct NvKmsFlipCommonParams * params,NvBool commit,NvBool bFromKmsSetMode)2544 static NvBool NvKmsKapiOverlayLayerConfigToKms(
2545 struct NvKmsKapiDevice *device,
2546 const struct NvKmsKapiLayerRequestedConfig *layerRequestedConfig,
2547 const NvU32 layer,
2548 const NvU32 head,
2549 struct NvKmsFlipCommonParams *params,
2550 NvBool commit,
2551 NvBool bFromKmsSetMode)
2552 {
2553 NvBool ret = NV_FALSE;
2554 const struct NvKmsKapiLayerConfig *layerConfig =
2555 &layerRequestedConfig->config;
2556
2557 if (layerRequestedConfig->flags.surfaceChanged || bFromKmsSetMode) {
2558 params->layer[layer].syncObjects.specified = NV_TRUE;
2559 params->layer[layer].completionNotifier.specified = NV_TRUE;
2560 params->layer[layer].surface.specified = NV_TRUE;
2561
2562 if (layerConfig->surface != NULL) {
2563 params->layer[layer].surface.handle[NVKMS_LEFT] =
2564 layerConfig->surface->hKmsHandle;
2565 }
2566
2567 params->layer[layer].surface.rrParams =
2568 layerConfig->rrParams;
2569
2570 params->layer[layer].compositionParams.val.colorKeySelect =
2571 NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE;
2572 params->layer[layer].compositionParams.val.blendingMode[1] =
2573 layerConfig->compParams.compMode;
2574 params->layer[layer].compositionParams.val.surfaceAlpha =
2575 layerConfig->compParams.surfaceAlpha;
2576 params->layer[layer].compositionParams.specified = TRUE;
2577 params->layer[layer].minPresentInterval =
2578 layerConfig->minPresentInterval;
2579
2580 params->layer[layer].colorSpace.val = layerConfig->inputColorSpace;
2581 params->layer[layer].colorSpace.specified = TRUE;
2582 }
2583
2584 if (layerRequestedConfig->flags.cscChanged) {
2585 params->layer[layer].csc.specified = NV_TRUE;
2586 params->layer[layer].csc.useMain = layerConfig->cscUseMain;
2587 if (!layerConfig->cscUseMain) {
2588 params->layer[layer].csc.matrix = layerConfig->csc;
2589 }
2590 }
2591
2592 if (layerRequestedConfig->flags.srcWHChanged || bFromKmsSetMode) {
2593 params->layer[layer].sizeIn.val.width = layerConfig->srcWidth;
2594 params->layer[layer].sizeIn.val.height = layerConfig->srcHeight;
2595 params->layer[layer].sizeIn.specified = TRUE;
2596 }
2597
2598 if (layerRequestedConfig->flags.dstWHChanged || bFromKmsSetMode) {
2599 params->layer[layer].sizeOut.val.width = layerConfig->dstWidth;
2600 params->layer[layer].sizeOut.val.height = layerConfig->dstHeight;
2601 params->layer[layer].sizeOut.specified = TRUE;
2602 }
2603
2604 if (layerRequestedConfig->flags.dstXYChanged || bFromKmsSetMode) {
2605 params->layer[layer].outputPosition.val.x = layerConfig->dstX;
2606 params->layer[layer].outputPosition.val.y = layerConfig->dstY;
2607
2608 params->layer[layer].outputPosition.specified = NV_TRUE;
2609 }
2610
2611 AssignHDRMetadataConfig(layerConfig, layerRequestedConfig, layer,
2612 params, bFromKmsSetMode);
2613
2614 if (commit) {
2615 NvU32 nextIndex = NVKMS_KAPI_INC_NOTIFIER_INDEX(
2616 device->layerState[head][layer].
2617 currFlipNotifierIndex);
2618
2619 if (layerConfig->surface != NULL) {
2620 NvU32 nextIndexOffsetInBytes =
2621 NVKMS_KAPI_NOTIFIER_OFFSET(head,
2622 layer, nextIndex);
2623
2624 params->layer[layer].completionNotifier.val.
2625 surface.surfaceHandle = device->notifier.hKmsHandle;
2626
2627 params->layer[layer].completionNotifier.val.
2628 surface.format = device->notifier.format;
2629
2630 params->layer[layer].completionNotifier.val.
2631 surface.offsetInWords = nextIndexOffsetInBytes >> 2;
2632
2633 params->layer[layer].completionNotifier.val.awaken = NV_TRUE;
2634 }
2635
2636 ret = AssignSyncObjectConfig(device,
2637 layerConfig,
2638 ¶ms->layer[layer].syncObjects.val);
2639 if (ret == NV_FALSE) {
2640 return ret;
2641 }
2642
2643 /*
2644 * XXX Should this be done after commit?
2645 * What if commit fail?
2646 *
2647 * It is not expected to fail any commit in KAPI layer,
2648 * only validated configuration is expected
2649 * to commit.
2650 */
2651 device->layerState[head][layer].
2652 currFlipNotifierIndex = nextIndex;
2653 }
2654
2655 return NV_TRUE;
2656 }
2657
NvKmsKapiPrimaryLayerConfigToKms(struct NvKmsKapiDevice * device,const struct NvKmsKapiLayerRequestedConfig * layerRequestedConfig,const NvU32 head,struct NvKmsFlipCommonParams * params,NvBool commit,NvBool bFromKmsSetMode)2658 static NvBool NvKmsKapiPrimaryLayerConfigToKms(
2659 struct NvKmsKapiDevice *device,
2660 const struct NvKmsKapiLayerRequestedConfig *layerRequestedConfig,
2661 const NvU32 head,
2662 struct NvKmsFlipCommonParams *params,
2663 NvBool commit,
2664 NvBool bFromKmsSetMode)
2665 {
2666 NvBool ret = NV_FALSE;
2667 const struct NvKmsKapiLayerConfig *layerConfig =
2668 &layerRequestedConfig->config;
2669
2670 NvBool changed = FALSE;
2671
2672 if (layerRequestedConfig->flags.surfaceChanged || bFromKmsSetMode) {
2673 params->layer[NVKMS_MAIN_LAYER].surface.specified = NV_TRUE;
2674 params->layer[NVKMS_MAIN_LAYER].completionNotifier.specified = NV_TRUE;
2675 params->layer[NVKMS_MAIN_LAYER].syncObjects.specified = NV_TRUE;
2676
2677
2678 params->layer[NVKMS_MAIN_LAYER].minPresentInterval =
2679 layerConfig->minPresentInterval;
2680 params->layer[NVKMS_MAIN_LAYER].tearing = layerConfig->tearing;
2681 params->layer[NVKMS_MAIN_LAYER].surface.rrParams = layerConfig->rrParams;
2682
2683 if (layerConfig->surface != NULL) {
2684 params->layer[NVKMS_MAIN_LAYER].surface.handle[0] =
2685 layerConfig->surface->hKmsHandle;
2686
2687 if (params->layer[NVKMS_MAIN_LAYER].surface.handle[0] != 0) {
2688 params->layer[NVKMS_MAIN_LAYER].sizeIn.val.width = layerConfig->srcWidth;
2689 params->layer[NVKMS_MAIN_LAYER].sizeIn.val.height = layerConfig->srcHeight;
2690 params->layer[NVKMS_MAIN_LAYER].sizeIn.specified = TRUE;
2691
2692 params->layer[NVKMS_MAIN_LAYER].sizeOut.val.width = layerConfig->dstWidth;
2693 params->layer[NVKMS_MAIN_LAYER].sizeOut.val.height = layerConfig->dstHeight;
2694 params->layer[NVKMS_MAIN_LAYER].sizeOut.specified = TRUE;
2695 }
2696 }
2697
2698 params->layer[NVKMS_MAIN_LAYER].colorSpace.val = layerConfig->inputColorSpace;
2699 params->layer[NVKMS_MAIN_LAYER].colorSpace.specified = TRUE;
2700
2701 changed = TRUE;
2702 }
2703
2704 if (layerRequestedConfig->flags.srcXYChanged || bFromKmsSetMode) {
2705 params->viewPortIn.point.x = layerConfig->srcX;
2706 params->viewPortIn.point.y = layerConfig->srcY;
2707 params->viewPortIn.specified = NV_TRUE;
2708
2709 changed = TRUE;
2710 }
2711
2712 if (layerRequestedConfig->flags.cscChanged) {
2713 nvAssert(!layerConfig->cscUseMain);
2714
2715 params->layer[NVKMS_MAIN_LAYER].csc.specified = NV_TRUE;
2716 params->layer[NVKMS_MAIN_LAYER].csc.useMain = FALSE;
2717 params->layer[NVKMS_MAIN_LAYER].csc.matrix = layerConfig->csc;
2718
2719 changed = TRUE;
2720 }
2721
2722 AssignHDRMetadataConfig(layerConfig, layerRequestedConfig, NVKMS_MAIN_LAYER,
2723 params, bFromKmsSetMode);
2724
2725 if (commit && changed) {
2726 NvU32 nextIndex = NVKMS_KAPI_INC_NOTIFIER_INDEX(
2727 device->layerState[head][NVKMS_MAIN_LAYER].
2728 currFlipNotifierIndex);
2729
2730 if (layerConfig->surface != NULL) {
2731 NvU32 nextIndexOffsetInBytes =
2732 NVKMS_KAPI_NOTIFIER_OFFSET(head,
2733 NVKMS_MAIN_LAYER, nextIndex);
2734
2735 params->layer[NVKMS_MAIN_LAYER].completionNotifier.
2736 val.surface.surfaceHandle = device->notifier.hKmsHandle;
2737
2738 params->layer[NVKMS_MAIN_LAYER].completionNotifier.
2739 val.surface.format = device->notifier.format;
2740
2741 params->layer[NVKMS_MAIN_LAYER].completionNotifier.
2742 val.surface.offsetInWords = nextIndexOffsetInBytes >> 2;
2743
2744 params->layer[NVKMS_MAIN_LAYER].completionNotifier.val.awaken = NV_TRUE;
2745 }
2746
2747 ret = AssignSyncObjectConfig(device,
2748 layerConfig,
2749 ¶ms->layer[NVKMS_MAIN_LAYER].syncObjects.val);
2750 if (ret == NV_FALSE) {
2751 return ret;
2752 }
2753
2754 /*
2755 * XXX Should this be done after commit?
2756 * What if commit fail?
2757 *
2758 * It is not expected to fail any commit in KAPI layer,
2759 * only validated configuration is expected
2760 * to commit.
2761 */
2762 device->layerState[head][NVKMS_MAIN_LAYER].
2763 currFlipNotifierIndex = nextIndex;
2764 }
2765
2766 return NV_TRUE;
2767 }
2768
NvKmsKapiLayerConfigToKms(struct NvKmsKapiDevice * device,const struct NvKmsKapiLayerRequestedConfig * layerRequestedConfig,const NvU32 layer,const NvU32 head,struct NvKmsFlipCommonParams * params,NvBool commit,NvBool bFromKmsSetMode)2769 static NvBool NvKmsKapiLayerConfigToKms(
2770 struct NvKmsKapiDevice *device,
2771 const struct NvKmsKapiLayerRequestedConfig *layerRequestedConfig,
2772 const NvU32 layer,
2773 const NvU32 head,
2774 struct NvKmsFlipCommonParams *params,
2775 NvBool commit,
2776 NvBool bFromKmsSetMode)
2777 {
2778 if (layer == NVKMS_KAPI_LAYER_PRIMARY_IDX) {
2779 return NvKmsKapiPrimaryLayerConfigToKms(device,
2780 layerRequestedConfig,
2781 head,
2782 params,
2783 commit,
2784 bFromKmsSetMode);
2785
2786 }
2787
2788 return NvKmsKapiOverlayLayerConfigToKms(device,
2789 layerRequestedConfig,
2790 layer,
2791 head,
2792 params,
2793 commit,
2794 bFromKmsSetMode);
2795 }
2796
NvKmsKapiHeadLutConfigToKms(const struct NvKmsKapiHeadModeSetConfig * modeSetConfig,struct NvKmsSetLutCommonParams * lutParams)2797 static void NvKmsKapiHeadLutConfigToKms(
2798 const struct NvKmsKapiHeadModeSetConfig *modeSetConfig,
2799 struct NvKmsSetLutCommonParams *lutParams)
2800 {
2801 struct NvKmsSetInputLutParams *input = &lutParams->input;
2802 struct NvKmsSetOutputLutParams *output = &lutParams->output;
2803
2804 /* input LUT */
2805 input->specified = modeSetConfig->lut.input.specified;
2806 input->depth = modeSetConfig->lut.input.depth;
2807 input->start = modeSetConfig->lut.input.start;
2808 input->end = modeSetConfig->lut.input.end;
2809
2810 input->pRamps = nvKmsPointerToNvU64(modeSetConfig->lut.input.pRamps);
2811
2812 /* output LUT */
2813 output->specified = modeSetConfig->lut.output.specified;
2814 output->enabled = modeSetConfig->lut.output.enabled;
2815
2816 output->pRamps = nvKmsPointerToNvU64(modeSetConfig->lut.output.pRamps);
2817 }
2818
AnyLayerTransferFunctionChanged(const struct NvKmsKapiHeadRequestedConfig * headRequestedConfig)2819 static NvBool AnyLayerTransferFunctionChanged(
2820 const struct NvKmsKapiHeadRequestedConfig *headRequestedConfig)
2821 {
2822 NvU32 layer;
2823
2824 for (layer = 0;
2825 layer < ARRAY_LEN(headRequestedConfig->layerRequestedConfig);
2826 layer++) {
2827 const struct NvKmsKapiLayerRequestedConfig *layerRequestedConfig =
2828 &headRequestedConfig->layerRequestedConfig[layer];
2829
2830 if (layerRequestedConfig->flags.tfChanged) {
2831 return NV_TRUE;
2832 }
2833 }
2834
2835 return NV_FALSE;
2836 }
2837
GetOutputTransferFunction(const struct NvKmsKapiHeadRequestedConfig * headRequestedConfig,enum NvKmsOutputTf * tf)2838 static NvBool GetOutputTransferFunction(
2839 const struct NvKmsKapiHeadRequestedConfig *headRequestedConfig,
2840 enum NvKmsOutputTf *tf)
2841 {
2842 NvBool found = NV_FALSE;
2843 NvU32 layer;
2844
2845 *tf = NVKMS_OUTPUT_TF_NONE;
2846
2847 for (layer = 0;
2848 layer < ARRAY_LEN(headRequestedConfig->layerRequestedConfig);
2849 layer++) {
2850 const struct NvKmsKapiLayerRequestedConfig *layerRequestedConfig =
2851 &headRequestedConfig->layerRequestedConfig[layer];
2852 const struct NvKmsKapiLayerConfig *layerConfig =
2853 &layerRequestedConfig->config;
2854
2855 if (layerConfig->hdrMetadata.enabled) {
2856 if (!found) {
2857 *tf = layerConfig->tf;
2858 found = NV_TRUE;
2859 } else if (*tf != layerConfig->tf) {
2860 nvKmsKapiLogDebug(
2861 "Output transfer function should be the same for all layers on a head");
2862 return NV_FALSE;
2863 }
2864 }
2865 }
2866
2867 return NV_TRUE;
2868 }
2869
2870 /*
2871 * Helper function to convert NvKmsKapiRequestedModeSetConfig
2872 * to NvKmsSetModeParams.
2873 */
NvKmsKapiRequestedModeSetConfigToKms(struct NvKmsKapiDevice * device,const struct NvKmsKapiRequestedModeSetConfig * requestedConfig,struct NvKmsSetModeParams * params,NvBool commit)2874 static NvBool NvKmsKapiRequestedModeSetConfigToKms(
2875 struct NvKmsKapiDevice *device,
2876 const struct NvKmsKapiRequestedModeSetConfig *requestedConfig,
2877 struct NvKmsSetModeParams *params,
2878 NvBool commit)
2879 {
2880 NvU32 dispIdx = device->dispIdx;
2881 NvU32 head;
2882
2883 nvkms_memset(params, 0, sizeof(*params));
2884
2885 params->request.commit = commit;
2886 params->request.deviceHandle = device->hKmsDevice;
2887 params->request.requestedDispsBitMask = 1 << dispIdx;
2888
2889 for (head = 0;
2890 head < ARRAY_LEN(requestedConfig->headRequestedConfig); head++) {
2891
2892 const struct NvKmsKapiHeadRequestedConfig *headRequestedConfig =
2893 &requestedConfig->headRequestedConfig[head];
2894 const struct NvKmsKapiHeadModeSetConfig *headModeSetConfig =
2895 &headRequestedConfig->modeSetConfig;
2896 struct NvKmsSetModeOneHeadRequest *paramsHead;
2897 enum NvKmsOutputTf tf;
2898 NvU32 layer;
2899 NvU32 i;
2900
2901 if ((requestedConfig->headsMask & (1 << head)) == 0x0) {
2902 continue;
2903 }
2904
2905 params->request.disp[dispIdx].requestedHeadsBitMask |= 1 << head;
2906
2907 if (headModeSetConfig->numDisplays == 0) {
2908 continue;
2909 }
2910
2911 if (params->request.commit && !headModeSetConfig->bActive) {
2912 continue;
2913 }
2914
2915 paramsHead = ¶ms->request.disp[dispIdx].head[head];
2916
2917 InitNvKmsModeValidationParams(device,
2918 ¶msHead->modeValidationParams);
2919
2920 for (i = 0; i < headModeSetConfig->numDisplays; i++) {
2921 paramsHead->dpyIdList = nvAddDpyIdToDpyIdList(
2922 nvNvU32ToDpyId(headModeSetConfig->displays[i]),
2923 paramsHead->dpyIdList);
2924 }
2925
2926 NvKmsKapiDisplayModeToKapi(&headModeSetConfig->mode, ¶msHead->mode);
2927
2928 if (headRequestedConfig->flags.lutChanged) {
2929 NvKmsKapiHeadLutConfigToKms(headModeSetConfig, ¶msHead->flip.lut);
2930 }
2931
2932 NvKmsKapiCursorConfigToKms(&headRequestedConfig->cursorRequestedConfig,
2933 ¶msHead->flip,
2934 NV_TRUE /* bFromKmsSetMode */);
2935 for (layer = 0;
2936 layer < ARRAY_LEN(headRequestedConfig->layerRequestedConfig);
2937 layer++) {
2938
2939 const struct NvKmsKapiLayerRequestedConfig *layerRequestedConfig =
2940 &headRequestedConfig->layerRequestedConfig[layer];
2941
2942 if (!NvKmsKapiLayerConfigToKms(device,
2943 layerRequestedConfig,
2944 layer,
2945 head,
2946 ¶msHead->flip,
2947 commit,
2948 NV_TRUE /* bFromKmsSetMode */)) {
2949 return NV_FALSE;
2950 }
2951 }
2952
2953 if (!GetOutputTransferFunction(headRequestedConfig, &tf)) {
2954 return NV_FALSE;
2955 }
2956
2957 paramsHead->flip.tf.val = tf;
2958 paramsHead->flip.tf.specified = NV_TRUE;
2959
2960 paramsHead->flip.hdrInfoFrame.specified = NV_TRUE;
2961 paramsHead->flip.hdrInfoFrame.enabled =
2962 headModeSetConfig->hdrInfoFrame.enabled;
2963 if (headModeSetConfig->hdrInfoFrame.enabled) {
2964 paramsHead->flip.hdrInfoFrame.eotf =
2965 headModeSetConfig->hdrInfoFrame.eotf;
2966 paramsHead->flip.hdrInfoFrame.staticMetadata =
2967 headModeSetConfig->hdrInfoFrame.staticMetadata;
2968 }
2969
2970 paramsHead->flip.colorimetry.specified = NV_TRUE;
2971 paramsHead->flip.colorimetry.val = headModeSetConfig->colorimetry;
2972
2973 paramsHead->viewPortSizeIn.width =
2974 headModeSetConfig->mode.timings.hVisible;
2975 paramsHead->viewPortSizeIn.height =
2976 headModeSetConfig->mode.timings.vVisible;
2977
2978 if (device->caps.requiresVrrSemaphores) {
2979 paramsHead->allowGsync = NV_FALSE;
2980 paramsHead->allowAdaptiveSync = NVKMS_ALLOW_ADAPTIVE_SYNC_DISABLED;
2981 } else {
2982 paramsHead->allowGsync = NV_TRUE;
2983 paramsHead->allowAdaptiveSync = NVKMS_ALLOW_ADAPTIVE_SYNC_ALL;
2984 }
2985 }
2986
2987 return NV_TRUE;
2988 }
2989
2990
KmsSetMode(struct NvKmsKapiDevice * device,const struct NvKmsKapiRequestedModeSetConfig * requestedConfig,struct NvKmsKapiModeSetReplyConfig * replyConfig,const NvBool commit)2991 static NvBool KmsSetMode(
2992 struct NvKmsKapiDevice *device,
2993 const struct NvKmsKapiRequestedModeSetConfig *requestedConfig,
2994 struct NvKmsKapiModeSetReplyConfig *replyConfig,
2995 const NvBool commit)
2996 {
2997 struct NvKmsSetModeParams *params = NULL;
2998 NvBool status = NV_FALSE;
2999
3000 params = nvKmsKapiCalloc(1, sizeof(*params));
3001
3002 if (params == NULL) {
3003 goto done;
3004 }
3005
3006 if (!NvKmsKapiRequestedModeSetConfigToKms(device,
3007 requestedConfig,
3008 params,
3009 commit)) {
3010 goto done;
3011 }
3012
3013 status = nvkms_ioctl_from_kapi(device->pKmsOpen,
3014 NVKMS_IOCTL_SET_MODE,
3015 params, sizeof(*params));
3016
3017 replyConfig->flipResult =
3018 (params->reply.status == NVKMS_SET_MODE_STATUS_SUCCESS) ?
3019 NV_KMS_FLIP_RESULT_SUCCESS :
3020 NV_KMS_FLIP_RESULT_INVALID_PARAMS;
3021
3022 if (!status) {
3023 nvKmsKapiLogDeviceDebug(
3024 device,
3025 "NVKMS_IOCTL_SET_MODE ioctl failed");
3026 goto done;
3027 }
3028
3029 if (params->reply.status != NVKMS_SET_MODE_STATUS_SUCCESS)
3030 {
3031 int i;
3032
3033 nvKmsKapiLogDeviceDebug(
3034 device,
3035 "NVKMS_IOCTL_SET_MODE failed! Status:\n");
3036
3037 nvKmsKapiLogDeviceDebug(
3038 device,
3039 " top-level status: %d\n", params->reply.status);
3040
3041 nvKmsKapiLogDeviceDebug(
3042 device,
3043 " disp0 status: %d\n", params->reply.disp[0].status);
3044
3045 for (i = 0; i < ARRAY_LEN(params->reply.disp[0].head); i++)
3046 {
3047 nvKmsKapiLogDeviceDebug(
3048 device,
3049 " head%d status: %d\n",
3050 i, params->reply.disp[0].head[i].status);
3051 }
3052
3053 status = NV_FALSE;
3054 }
3055
3056 done:
3057
3058 if (params != NULL) {
3059 nvKmsKapiFree(params);
3060 }
3061
3062 return status;
3063 }
3064
IsHeadConfigValid(const struct NvKmsFlipParams * params,const struct NvKmsKapiRequestedModeSetConfig * requestedConfig,const struct NvKmsKapiHeadModeSetConfig * headModeSetConfig,NvU32 head)3065 static NvBool IsHeadConfigValid(
3066 const struct NvKmsFlipParams *params,
3067 const struct NvKmsKapiRequestedModeSetConfig *requestedConfig,
3068 const struct NvKmsKapiHeadModeSetConfig *headModeSetConfig,
3069 NvU32 head)
3070 {
3071 if ((requestedConfig->headsMask & (1 << head)) == 0x0) {
3072 return NV_FALSE;
3073 }
3074
3075 if (headModeSetConfig->numDisplays == 0) {
3076 return NV_FALSE;
3077 }
3078
3079 if (params->request.commit && !headModeSetConfig->bActive) {
3080 return NV_FALSE;
3081 }
3082 return NV_TRUE;
3083 }
3084
KmsFlip(struct NvKmsKapiDevice * device,const struct NvKmsKapiRequestedModeSetConfig * requestedConfig,struct NvKmsKapiModeSetReplyConfig * replyConfig,const NvBool commit)3085 static NvBool KmsFlip(
3086 struct NvKmsKapiDevice *device,
3087 const struct NvKmsKapiRequestedModeSetConfig *requestedConfig,
3088 struct NvKmsKapiModeSetReplyConfig *replyConfig,
3089 const NvBool commit)
3090 {
3091 struct NvKmsFlipParams *params = NULL;
3092 struct NvKmsFlipRequestOneHead *pFlipHead = NULL;
3093 NvBool status = NV_TRUE;
3094 NvU32 i, head;
3095
3096 /* Allocate space for the params structure, plus space for each possible
3097 * head. */
3098 params = nvKmsKapiCalloc(1,
3099 sizeof(*params) + sizeof(pFlipHead[0]) * NVKMS_KAPI_MAX_HEADS);
3100
3101 if (params == NULL) {
3102 return NV_FALSE;
3103 }
3104
3105 /* The flipHead array was allocated in the same block above. */
3106 pFlipHead = (struct NvKmsFlipRequestOneHead *)(params + 1);
3107
3108 params->request.deviceHandle = device->hKmsDevice;
3109 params->request.commit = commit;
3110 params->request.allowVrr = NV_FALSE;
3111 params->request.pFlipHead = nvKmsPointerToNvU64(pFlipHead);
3112 params->request.numFlipHeads = 0;
3113 for (head = 0;
3114 head < ARRAY_LEN(requestedConfig->headRequestedConfig); head++) {
3115
3116 const struct NvKmsKapiHeadRequestedConfig *headRequestedConfig =
3117 &requestedConfig->headRequestedConfig[head];
3118 const struct NvKmsKapiHeadModeSetConfig *headModeSetConfig =
3119 &headRequestedConfig->modeSetConfig;
3120
3121 struct NvKmsFlipCommonParams *flipParams = NULL;
3122
3123 NvU32 layer;
3124
3125 if (!IsHeadConfigValid(params, requestedConfig, headModeSetConfig, head)) {
3126 continue;
3127 }
3128
3129 pFlipHead[params->request.numFlipHeads].sd = 0;
3130 pFlipHead[params->request.numFlipHeads].head = head;
3131 flipParams = &pFlipHead[params->request.numFlipHeads].flip;
3132 params->request.numFlipHeads++;
3133
3134 NvKmsKapiCursorConfigToKms(&headRequestedConfig->cursorRequestedConfig,
3135 flipParams,
3136 NV_FALSE /* bFromKmsSetMode */);
3137
3138 for (layer = 0;
3139 layer < ARRAY_LEN(headRequestedConfig->layerRequestedConfig);
3140 layer++) {
3141
3142 const struct NvKmsKapiLayerRequestedConfig
3143 *layerRequestedConfig =
3144 &headRequestedConfig->layerRequestedConfig[layer];
3145
3146 status = NvKmsKapiLayerConfigToKms(device,
3147 layerRequestedConfig,
3148 layer,
3149 head,
3150 flipParams,
3151 commit,
3152 NV_FALSE /* bFromKmsSetMode */);
3153
3154 if (status != NV_TRUE) {
3155 goto done;
3156 }
3157 }
3158
3159 flipParams->tf.specified =
3160 AnyLayerTransferFunctionChanged(headRequestedConfig);
3161 if (flipParams->tf.specified) {
3162 enum NvKmsOutputTf tf;
3163 status = GetOutputTransferFunction(headRequestedConfig, &tf);
3164 if (status != NV_TRUE) {
3165 goto done;
3166 }
3167 flipParams->tf.val = tf;
3168 }
3169
3170 flipParams->hdrInfoFrame.specified =
3171 headRequestedConfig->flags.hdrInfoFrameChanged;
3172 if (flipParams->hdrInfoFrame.specified) {
3173 flipParams->hdrInfoFrame.enabled =
3174 headModeSetConfig->hdrInfoFrame.enabled;
3175 if (headModeSetConfig->hdrInfoFrame.enabled) {
3176 flipParams->hdrInfoFrame.eotf =
3177 headModeSetConfig->hdrInfoFrame.eotf;
3178 flipParams->hdrInfoFrame.staticMetadata =
3179 headModeSetConfig->hdrInfoFrame.staticMetadata;
3180 }
3181 }
3182
3183 flipParams->colorimetry.specified =
3184 headRequestedConfig->flags.colorimetryChanged;
3185 if (flipParams->colorimetry.specified) {
3186 flipParams->colorimetry.val = headModeSetConfig->colorimetry;
3187 }
3188
3189 if (headModeSetConfig->vrrEnabled) {
3190 params->request.allowVrr = NV_TRUE;
3191 }
3192 if (headRequestedConfig->flags.lutChanged) {
3193 NvKmsKapiHeadLutConfigToKms(headModeSetConfig, &flipParams->lut);
3194 }
3195 }
3196
3197 if (params->request.numFlipHeads == 0) {
3198 goto done;
3199 }
3200
3201 status = nvkms_ioctl_from_kapi(device->pKmsOpen,
3202 NVKMS_IOCTL_FLIP,
3203 params, sizeof(*params));
3204
3205 replyConfig->flipResult = params->reply.flipResult;
3206
3207 if (!status) {
3208 nvKmsKapiLogDeviceDebug(
3209 device,
3210 "NVKMS_IOCTL_FLIP ioctl failed");
3211 goto done;
3212 }
3213
3214 if (!commit) {
3215 goto done;
3216 }
3217
3218 /*! fill back flip reply */
3219 for (i = 0; i < params->request.numFlipHeads; i++) {
3220 const struct NvKmsKapiHeadRequestedConfig *headRequestedConfig =
3221 &requestedConfig->headRequestedConfig[pFlipHead[i].head];
3222
3223 struct NvKmsKapiHeadReplyConfig *headReplyConfig =
3224 &replyConfig->headReplyConfig[pFlipHead[i].head];
3225
3226 const struct NvKmsKapiHeadModeSetConfig *headModeSetConfig =
3227 &headRequestedConfig->modeSetConfig;
3228
3229 struct NvKmsFlipCommonReplyOneHead *flipParams = ¶ms->reply.flipHead[i];
3230
3231 NvU32 layer;
3232
3233 if (!IsHeadConfigValid(params, requestedConfig, headModeSetConfig, pFlipHead[i].head)) {
3234 continue;
3235 }
3236
3237 for (layer = 0;
3238 layer < ARRAY_LEN(headRequestedConfig->layerRequestedConfig);
3239 layer++) {
3240
3241 const struct NvKmsKapiLayerConfig *layerRequestedConfig =
3242 &headRequestedConfig->layerRequestedConfig[layer].config;
3243
3244 struct NvKmsKapiLayerReplyConfig *layerReplyConfig =
3245 &headReplyConfig->layerReplyConfig[layer];
3246
3247 /*! initialize explicitly to -1 as 0 is valid file descriptor */
3248 layerReplyConfig->postSyncptFd = -1;
3249 if (layerRequestedConfig->syncptParams.postSyncptRequested) {
3250 layerReplyConfig->postSyncptFd =
3251 flipParams->layer[layer].postSyncpt.u.fd;
3252 }
3253 }
3254 }
3255
3256 done:
3257
3258 nvKmsKapiFree(params);
3259
3260 return status;
3261 }
3262
ApplyModeSetConfig(struct NvKmsKapiDevice * device,const struct NvKmsKapiRequestedModeSetConfig * requestedConfig,struct NvKmsKapiModeSetReplyConfig * replyConfig,const NvBool commit)3263 static NvBool ApplyModeSetConfig(
3264 struct NvKmsKapiDevice *device,
3265 const struct NvKmsKapiRequestedModeSetConfig *requestedConfig,
3266 struct NvKmsKapiModeSetReplyConfig *replyConfig,
3267 const NvBool commit)
3268 {
3269 NvBool bRequiredModeset = NV_FALSE;
3270 NvU32 head;
3271
3272 if (device == NULL || requestedConfig == NULL) {
3273 return NV_FALSE;
3274 }
3275
3276 for (head = 0;
3277 head < ARRAY_LEN(requestedConfig->headRequestedConfig); head++) {
3278
3279 const struct NvKmsKapiHeadRequestedConfig *headRequestedConfig =
3280 &requestedConfig->headRequestedConfig[head];
3281 const struct NvKmsKapiHeadModeSetConfig *headModeSetConfig =
3282 &headRequestedConfig->modeSetConfig;
3283
3284 if ((requestedConfig->headsMask & (1 << head)) == 0x0) {
3285 continue;
3286 }
3287
3288 bRequiredModeset =
3289 headRequestedConfig->flags.activeChanged ||
3290 headRequestedConfig->flags.displaysChanged ||
3291 headRequestedConfig->flags.modeChanged;
3292
3293 /*
3294 * NVKMS flip ioctl could not validate flip configuration for an
3295 * inactive head, therefore use modeset ioctl if configuration contain
3296 * any such head.
3297 */
3298 if (!commit &&
3299 headModeSetConfig->numDisplays != 0 && !headModeSetConfig->bActive) {
3300 bRequiredModeset = TRUE;
3301 }
3302
3303 if (bRequiredModeset) {
3304 break;
3305 }
3306 }
3307
3308 if (bRequiredModeset) {
3309 return KmsSetMode(device, requestedConfig, replyConfig, commit);
3310 }
3311
3312 return KmsFlip(device, requestedConfig, replyConfig, commit);
3313 }
3314
3315 /*
3316 * This executes without the nvkms_lock held. The lock will be grabbed
3317 * during the kapi dispatching contained in this function.
3318 */
nvKmsKapiHandleEventQueueChange(struct NvKmsKapiDevice * device)3319 void nvKmsKapiHandleEventQueueChange
3320 (
3321 struct NvKmsKapiDevice *device
3322 )
3323 {
3324 if (device == NULL) {
3325 return;
3326 }
3327
3328 /*
3329 * If the callback is NULL, event interest declaration should be
3330 * rejected, and no events would be reported.
3331 */
3332 nvAssert(device->eventCallback != NULL);
3333
3334 do
3335 {
3336 struct NvKmsGetNextEventParams kmsEventParams = { };
3337 struct NvKmsKapiEvent kapiEvent = { };
3338 NvBool err = NV_FALSE;
3339
3340 if (!nvkms_ioctl_from_kapi(device->pKmsOpen,
3341 NVKMS_IOCTL_GET_NEXT_EVENT,
3342 &kmsEventParams, sizeof(kmsEventParams))) {
3343 break;
3344 }
3345
3346 if (!kmsEventParams.reply.valid) {
3347 break;
3348 }
3349
3350 kapiEvent.type = kmsEventParams.reply.event.eventType;
3351
3352 kapiEvent.device = device;
3353 kapiEvent.privateData = device->privateData;
3354
3355 switch (kmsEventParams.reply.event.eventType) {
3356 case NVKMS_EVENT_TYPE_DPY_CHANGED:
3357 kapiEvent.u.displayChanged.display =
3358 nvDpyIdToNvU32(kmsEventParams.
3359 reply.event.u.dpyChanged.dpyId);
3360 break;
3361 case NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED:
3362 kapiEvent.u.dynamicDisplayConnected.display =
3363 nvDpyIdToNvU32(kmsEventParams.
3364 reply.event.u.dynamicDpyConnected.dpyId);
3365 break;
3366 case NVKMS_EVENT_TYPE_FLIP_OCCURRED:
3367 kapiEvent.u.flipOccurred.head =
3368 kmsEventParams.reply.event.u.flipOccurred.head;
3369 kapiEvent.u.flipOccurred.layer =
3370 kmsEventParams.reply.event.u.flipOccurred.layer;
3371 break;
3372 default:
3373 continue;
3374 }
3375
3376 if (err) {
3377 nvKmsKapiLogDeviceDebug(
3378 device,
3379 "Error in conversion from "
3380 "NvKmsGetNextEventParams to NvKmsKapiEvent");
3381 continue;
3382 }
3383
3384 device->eventCallback(&kapiEvent);
3385
3386 } while(1);
3387 }
3388
3389 /*
3390 * Helper function to convert NvKmsQueryDpyCRC32Reply to NvKmsKapiDpyCRC32.
3391 */
NvKmsCrcsToKapi(const struct NvKmsQueryDpyCRC32Reply * crcs,struct NvKmsKapiCrcs * kmsCrcs)3392 static void NvKmsCrcsToKapi
3393 (
3394 const struct NvKmsQueryDpyCRC32Reply *crcs,
3395 struct NvKmsKapiCrcs *kmsCrcs
3396 )
3397 {
3398 kmsCrcs->outputCrc32.value = crcs->outputCrc32.value;
3399 kmsCrcs->outputCrc32.supported = crcs->outputCrc32.supported;
3400 kmsCrcs->rasterGeneratorCrc32.value = crcs->rasterGeneratorCrc32.value;
3401 kmsCrcs->rasterGeneratorCrc32.supported = crcs->rasterGeneratorCrc32.supported;
3402 kmsCrcs->compositorCrc32.value = crcs->compositorCrc32.value;
3403 kmsCrcs->compositorCrc32.supported = crcs->compositorCrc32.supported;
3404 }
3405
GetCRC32(struct NvKmsKapiDevice * device,NvU32 head,struct NvKmsKapiCrcs * crc32)3406 static NvBool GetCRC32
3407 (
3408 struct NvKmsKapiDevice *device,
3409 NvU32 head,
3410 struct NvKmsKapiCrcs *crc32
3411 )
3412 {
3413 struct NvKmsQueryDpyCRC32Params params = { };
3414 NvBool status;
3415
3416 if (device->hKmsDevice == 0x0) {
3417 return NV_TRUE;
3418 }
3419
3420 params.request.deviceHandle = device->hKmsDevice;
3421 params.request.dispHandle = device->hKmsDisp;
3422 params.request.head = head;
3423
3424 status = nvkms_ioctl_from_kapi(device->pKmsOpen,
3425 NVKMS_IOCTL_QUERY_DPY_CRC32,
3426 ¶ms, sizeof(params));
3427
3428 if (!status) {
3429 nvKmsKapiLogDeviceDebug(device, "NVKMS QueryDpyCRC32Data failed.");
3430 return NV_FALSE;
3431 }
3432 NvKmsCrcsToKapi(¶ms.reply, crc32);
3433 return NV_TRUE;
3434 }
3435
3436 static NvKmsKapiSuspendResumeCallbackFunc *pSuspendResumeFunc;
3437
nvKmsKapiSuspendResume(NvBool suspend)3438 void nvKmsKapiSuspendResume
3439 (
3440 NvBool suspend
3441 )
3442 {
3443 if (pSuspendResumeFunc) {
3444 pSuspendResumeFunc(suspend);
3445 }
3446 }
3447
nvKmsKapiSetSuspendResumeCallback(NvKmsKapiSuspendResumeCallbackFunc * function)3448 static void nvKmsKapiSetSuspendResumeCallback
3449 (
3450 NvKmsKapiSuspendResumeCallbackFunc *function
3451 )
3452 {
3453 if (pSuspendResumeFunc && function) {
3454 nvKmsKapiLogDebug("Kapi suspend/resume callback function already registered");
3455 }
3456
3457 pSuspendResumeFunc = function;
3458 }
3459
nvKmsKapiGetFunctionsTableInternal(struct NvKmsKapiFunctionsTable * funcsTable)3460 NvBool nvKmsKapiGetFunctionsTableInternal
3461 (
3462 struct NvKmsKapiFunctionsTable *funcsTable
3463 )
3464 {
3465 if (funcsTable == NULL) {
3466 return NV_FALSE;
3467 }
3468
3469 if (nvkms_strcmp(funcsTable->versionString, NV_VERSION_STRING) != 0) {
3470 funcsTable->versionString = NV_VERSION_STRING;
3471 return NV_FALSE;
3472 }
3473
3474 funcsTable->systemInfo.bAllowWriteCombining =
3475 nvkms_allow_write_combining();
3476
3477 funcsTable->enumerateGpus = EnumerateGpus;
3478
3479 funcsTable->allocateDevice = AllocateDevice;
3480 funcsTable->freeDevice = FreeDevice;
3481
3482 funcsTable->grabOwnership = GrabOwnership;
3483 funcsTable->releaseOwnership = ReleaseOwnership;
3484
3485 funcsTable->grantPermissions = GrantPermissions;
3486 funcsTable->revokePermissions = RevokePermissions;
3487 funcsTable->grantSubOwnership = GrantSubOwnership;
3488 funcsTable->revokeSubOwnership = RevokeSubOwnership;
3489
3490 funcsTable->declareEventInterest = DeclareEventInterest;
3491
3492 funcsTable->getDeviceResourcesInfo = GetDeviceResourcesInfo;
3493 funcsTable->getDisplays = GetDisplays;
3494 funcsTable->getConnectorInfo = GetConnectorInfo;
3495
3496 funcsTable->getStaticDisplayInfo = GetStaticDisplayInfo;
3497 funcsTable->getDynamicDisplayInfo = GetDynamicDisplayInfo;
3498
3499 funcsTable->allocateVideoMemory = AllocateVideoMemory;
3500 funcsTable->allocateSystemMemory = AllocateSystemMemory;
3501 funcsTable->importMemory = ImportMemory;
3502 funcsTable->dupMemory = DupMemory;
3503 funcsTable->exportMemory = ExportMemory;
3504 funcsTable->freeMemory = FreeMemory;
3505 funcsTable->getSystemMemoryHandleFromSgt = GetSystemMemoryHandleFromSgt;
3506 funcsTable->getSystemMemoryHandleFromDmaBuf =
3507 GetSystemMemoryHandleFromDmaBuf;
3508
3509 funcsTable->mapMemory = MapMemory;
3510 funcsTable->unmapMemory = UnmapMemory;
3511
3512 funcsTable->createSurface = CreateSurface;
3513 funcsTable->destroySurface = DestroySurface;
3514
3515 funcsTable->getDisplayMode = GetDisplayMode;
3516 funcsTable->validateDisplayMode = ValidateDisplayMode;
3517
3518 funcsTable->applyModeSetConfig = ApplyModeSetConfig;
3519
3520 funcsTable->allocateChannelEvent = nvKmsKapiAllocateChannelEvent;
3521 funcsTable->freeChannelEvent = nvKmsKapiFreeChannelEvent;
3522
3523 funcsTable->getCRC32 = GetCRC32;
3524
3525 funcsTable->getMemoryPages = GetMemoryPages;
3526 funcsTable->freeMemoryPages = FreeMemoryPages;
3527
3528 funcsTable->isMemoryValidForDisplay = IsMemoryValidForDisplay;
3529
3530 funcsTable->importSemaphoreSurface = nvKmsKapiImportSemaphoreSurface;
3531 funcsTable->freeSemaphoreSurface = nvKmsKapiFreeSemaphoreSurface;
3532 funcsTable->registerSemaphoreSurfaceCallback =
3533 nvKmsKapiRegisterSemaphoreSurfaceCallback;
3534 funcsTable->unregisterSemaphoreSurfaceCallback =
3535 nvKmsKapiUnregisterSemaphoreSurfaceCallback;
3536 funcsTable->setSemaphoreSurfaceValue =
3537 nvKmsKapiSetSemaphoreSurfaceValue;
3538 funcsTable->setSuspendResumeCallback = nvKmsKapiSetSuspendResumeCallback;
3539
3540 return NV_TRUE;
3541 }
3542