1 /*
2 * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 * SPDX-License-Identifier: MIT
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "nvkms.h"
25 #include "nvkms-private.h"
26 #include "nvkms-api.h"
27
28 #include "nvkms-types.h"
29 #include "nvkms-utils.h"
30 #include "nvkms-console-restore.h"
31 #include "nvkms-dpy.h"
32 #include "nvkms-dma.h"
33 #include "nvkms-evo.h"
34 #include "nvkms-rm.h"
35 #include "nvkms-rmapi.h"
36 #include "nvkms-modepool.h"
37 #include "nvkms-modeset.h"
38 #include "nvkms-attributes.h"
39 #include "nvkms-dpy-override.h"
40 #include "nvkms-framelock.h"
41 #include "nvkms-surface.h"
42 #include "nvkms-3dvision.h"
43 #include "nvkms-ioctl.h"
44 #include "nvkms-headsurface.h"
45 #include "nvkms-headsurface-ioctl.h"
46 #include "nvkms-headsurface-swapgroup.h"
47 #include "nvkms-flip.h" /* nvFlipEvo */
48 #include "nvkms-vrr.h"
49
50 #include "dp/nvdp-connector.h"
51
52 #include "nvUnixVersion.h" /* NV_VERSION_STRING */
53 #include <class/cl0000.h> /* NV01_NULL_OBJECT/NV01_ROOT */
54
55 #include "nv_list.h"
56
57
58 /*! \file
59 *
60 * This source file implements the API of NVKMS, built around open,
61 * close, and ioctl file operations.
62 *
63 * An NvKmsPerOpen is stored "per-open"; all API handles are specific
64 * to a per-open instance. The NvKmsPerOpen is allocated during each
65 * nvKmsOpen() call, and freed during the corresponding nvKmsClose()
66 * call.
67 *
68 * An NvKmsPerOpenDev stores the API handles for the device and all
69 * the disps and connectors on the device. It is allocated during
70 * nvKmsIoctl(ALLOC_DEVICE), and freed during nvKmsIoctl(FREE_DEVICE).
71 */
72
73
74 /*
75 * When the NVKMS device file is opened, the per-open structure could
76 * be used for one of several actions, denoted by its "type". The
77 * per-open type starts as Undefined. The per-open's first use
78 * defines its type. Once the type transitions from Undefined to
79 * anything, it can never transition to any other type.
80 */
81 enum NvKmsPerOpenType {
82 /*
83 * The per-open is used for making ioctl calls to make requests of
84 * NVKMS.
85 */
86 NvKmsPerOpenTypeIoctl,
87
88 /*
89 * The per-open is used for granting access to a NVKMS registered
90 * surface.
91 */
92 NvKmsPerOpenTypeGrantSurface,
93
94 /*
95 * The per-open is used for granting permissions.
96 */
97 NvKmsPerOpenTypeGrantPermissions,
98
99 /*
100 * The per-open is used for granting access to a swap group
101 */
102 NvKmsPerOpenTypeGrantSwapGroup,
103
104 /*
105 * The per-open is used to unicast a specific event.
106 */
107 NvKmsPerOpenTypeUnicastEvent,
108
109 /*
110 * The per-open is currently undefined (this is the initial
111 * state).
112 */
113 NvKmsPerOpenTypeUndefined,
114 };
115
116 enum NvKmsUnicastEventType {
117 /* Used by:
118 * NVKMS_IOCTL_JOIN_SWAP_GROUP */
119 NvKmsUnicastEventTypeDeferredRequest,
120
121 /* Used by:
122 * NVKMS_IOCTL_NOTIFY_VBLANK */
123 NvKmsUnicastEventTypeVblankNotification,
124
125 /* Undefined, this indicates the unicast fd is available for use. */
126 NvKmsUnicastEventTypeUndefined,
127 };
128
129 struct NvKmsPerOpenConnector {
130 NVConnectorEvoPtr pConnectorEvo;
131 NvKmsConnectorHandle nvKmsApiHandle;
132 };
133
134 struct NvKmsPerOpenFrameLock {
135 NVFrameLockEvoPtr pFrameLockEvo;
136 int refCnt;
137 NvKmsFrameLockHandle nvKmsApiHandle;
138 };
139
140 struct NvKmsPerOpenDisp {
141 NVDispEvoPtr pDispEvo;
142 NvKmsDispHandle nvKmsApiHandle;
143 NvKmsFrameLockHandle frameLockHandle;
144 NVEvoApiHandlesRec connectorHandles;
145 struct NvKmsPerOpenConnector connector[NVKMS_MAX_CONNECTORS_PER_DISP];
146 NVEvoApiHandlesRec vblankSyncObjectHandles[NVKMS_MAX_HEADS_PER_DISP];
147 NVEvoApiHandlesRec vblankCallbackHandles[NVKMS_MAX_HEADS_PER_DISP];
148 NVEvoApiHandlesRec vblankSemControlHandles;
149 };
150
151 struct NvKmsPerOpenDev {
152 NVDevEvoPtr pDevEvo;
153 NvKmsDeviceHandle nvKmsApiHandle;
154 NVEvoApiHandlesRec dispHandles;
155 NVEvoApiHandlesRec surfaceHandles;
156 struct NvKmsFlipPermissions flipPermissions;
157 struct NvKmsModesetPermissions modesetPermissions;
158 struct NvKmsPerOpenDisp disp[NVKMS_MAX_SUBDEVICES];
159 NvBool isPrivileged;
160 NVEvoApiHandlesRec deferredRequestFifoHandles;
161 NVEvoApiHandlesRec swapGroupHandles;
162 };
163
164 struct NvKmsPerOpenEventListEntry {
165 NVListRec eventListEntry;
166 struct NvKmsEvent event;
167 };
168
169 struct NvKmsPerOpen {
170 nvkms_per_open_handle_t *pOpenKernel;
171 NvU32 pid;
172 enum NvKmsClientType clientType;
173 NVListRec perOpenListEntry;
174 NVListRec perOpenIoctlListEntry;
175 enum NvKmsPerOpenType type;
176
177 union {
178 struct {
179 NVListRec eventList;
180 NvU32 eventInterestMask;
181 NVEvoApiHandlesRec devHandles;
182 NVEvoApiHandlesRec frameLockHandles;
183 } ioctl;
184
185 struct {
186 NVSurfaceEvoPtr pSurfaceEvo;
187 } grantSurface;
188
189 struct {
190 NVDevEvoPtr pDevEvo;
191 NVSwapGroupPtr pSwapGroup;
192 } grantSwapGroup;
193
194 struct {
195 NVDevEvoPtr pDevEvo;
196 struct NvKmsPermissions permissions;
197 } grantPermissions;
198
199 struct {
200 /*
201 * A unicast event NvKmsPerOpen is assigned to an object, so that
202 * that object can generate events on the unicast event. Store a
203 * pointer to that object, so that we can clear the pointer when the
204 * unicast event NvKmsPerOpen is closed.
205 */
206 enum NvKmsUnicastEventType type;
207 union {
208 struct {
209 NVDeferredRequestFifoPtr pDeferredRequestFifo;
210 } deferred;
211
212 struct {
213 NvKmsGenericHandle hCallback;
214 struct NvKmsPerOpenDisp *pOpenDisp;
215 NvU32 apiHead;
216 } vblankNotification;
217 } e;
218 } unicastEvent;
219 };
220 };
221
222 static void AllocSurfaceCtxDmasForAllOpens(NVDevEvoRec *pDevEvo);
223 static void FreeSurfaceCtxDmasForAllOpens(NVDevEvoRec *pDevEvo);
224
225 static void EnableAndSetupVblankSyncObjectForAllOpens(NVDevEvoRec *pDevEvo);
226 static void DisableAndCleanVblankSyncObjectForAllOpens(NVDevEvoRec *pDevEvo);
227
228 static NVListRec perOpenList = NV_LIST_INIT(&perOpenList);
229 static NVListRec perOpenIoctlList = NV_LIST_INIT(&perOpenIoctlList);
230
231 /*!
232 * Check if there is an NvKmsPerOpenDev on this NvKmsPerOpen that has
233 * the specified deviceId.
234 */
DeviceIdAlreadyPresent(struct NvKmsPerOpen * pOpen,NvU32 deviceId)235 static NvBool DeviceIdAlreadyPresent(struct NvKmsPerOpen *pOpen, NvU32 deviceId)
236 {
237 struct NvKmsPerOpenDev *pOpenDev;
238 NvKmsGenericHandle dev;
239
240 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl);
241
242 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles,
243 pOpenDev, dev) {
244 if (pOpenDev->pDevEvo->usesTegraDevice &&
245 (deviceId == NVKMS_DEVICE_ID_TEGRA)) {
246 return TRUE;
247 } else if (pOpenDev->pDevEvo->deviceId == deviceId) {
248 return TRUE;
249 }
250 }
251
252 return FALSE;
253 }
254
255
256 /*!
257 * Get the NvKmsPerOpenDev described by NvKmsPerOpen + deviceHandle.
258 */
GetPerOpenDev(const struct NvKmsPerOpen * pOpen,const NvKmsDeviceHandle deviceHandle)259 static struct NvKmsPerOpenDev *GetPerOpenDev(
260 const struct NvKmsPerOpen *pOpen,
261 const NvKmsDeviceHandle deviceHandle)
262 {
263 if (pOpen == NULL) {
264 return NULL;
265 }
266
267 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl);
268
269 return nvEvoGetPointerFromApiHandle(&pOpen->ioctl.devHandles, deviceHandle);
270 }
271
272
273 /*!
274 * Get the NvKmsPerOpenDev and NvKmsPerOpenDisp described by
275 * NvKmsPerOpen + deviceHandle + dispHandle.
276 */
GetPerOpenDevAndDisp(const struct NvKmsPerOpen * pOpen,const NvKmsDeviceHandle deviceHandle,const NvKmsDispHandle dispHandle,struct NvKmsPerOpenDev ** ppOpenDev,struct NvKmsPerOpenDisp ** ppOpenDisp)277 static NvBool GetPerOpenDevAndDisp(
278 const struct NvKmsPerOpen *pOpen,
279 const NvKmsDeviceHandle deviceHandle,
280 const NvKmsDispHandle dispHandle,
281 struct NvKmsPerOpenDev **ppOpenDev,
282 struct NvKmsPerOpenDisp **ppOpenDisp)
283 {
284 struct NvKmsPerOpenDev *pOpenDev;
285 struct NvKmsPerOpenDisp *pOpenDisp;
286
287 pOpenDev = GetPerOpenDev(pOpen, deviceHandle);
288
289 if (pOpenDev == NULL) {
290 return FALSE;
291 }
292
293 pOpenDisp = nvEvoGetPointerFromApiHandle(&pOpenDev->dispHandles,
294 dispHandle);
295
296 if (pOpenDisp == NULL) {
297 return FALSE;
298 }
299
300 *ppOpenDev = pOpenDev;
301 *ppOpenDisp = pOpenDisp;
302
303 return TRUE;
304 }
305
306
307 /*!
308 * Get the NvKmsPerOpenDisp described by NvKmsPerOpen + deviceHandle +
309 * dispHandle.
310 */
GetPerOpenDisp(const struct NvKmsPerOpen * pOpen,const NvKmsDeviceHandle deviceHandle,const NvKmsDispHandle dispHandle)311 static struct NvKmsPerOpenDisp *GetPerOpenDisp(
312 const struct NvKmsPerOpen *pOpen,
313 const NvKmsDeviceHandle deviceHandle,
314 const NvKmsDispHandle dispHandle)
315 {
316 struct NvKmsPerOpenDev *pOpenDev;
317
318 pOpenDev = GetPerOpenDev(pOpen, deviceHandle);
319
320 if (pOpenDev == NULL) {
321 return NULL;
322 }
323
324 return nvEvoGetPointerFromApiHandle(&pOpenDev->dispHandles, dispHandle);
325 }
326
327
328 /*!
329 * Get the NvKmsPerOpenConnector described by NvKmsPerOpen +
330 * deviceHandle + dispHandle + connectorHandle.
331 */
GetPerOpenConnector(const struct NvKmsPerOpen * pOpen,const NvKmsDeviceHandle deviceHandle,const NvKmsDispHandle dispHandle,const NvKmsConnectorHandle connectorHandle)332 static struct NvKmsPerOpenConnector *GetPerOpenConnector(
333 const struct NvKmsPerOpen *pOpen,
334 const NvKmsDeviceHandle deviceHandle,
335 const NvKmsDispHandle dispHandle,
336 const NvKmsConnectorHandle connectorHandle)
337 {
338 struct NvKmsPerOpenDisp *pOpenDisp;
339
340 pOpenDisp = GetPerOpenDisp(pOpen, deviceHandle, dispHandle);
341
342 if (pOpenDisp == NULL) {
343 return NULL;
344 }
345
346 return nvEvoGetPointerFromApiHandle(&pOpenDisp->connectorHandles,
347 connectorHandle);
348 }
349
350
351 /*!
352 * Get the NVDpyEvoRec described by NvKmsPerOpen + deviceHandle +
353 * dispHandle + dpyId.
354 */
GetPerOpenDpy(const struct NvKmsPerOpen * pOpen,const NvKmsDeviceHandle deviceHandle,const NvKmsDispHandle dispHandle,const NVDpyId dpyId)355 static NVDpyEvoRec *GetPerOpenDpy(
356 const struct NvKmsPerOpen *pOpen,
357 const NvKmsDeviceHandle deviceHandle,
358 const NvKmsDispHandle dispHandle,
359 const NVDpyId dpyId)
360 {
361 struct NvKmsPerOpenDisp *pOpenDisp;
362
363 pOpenDisp = GetPerOpenDisp(pOpen, deviceHandle, dispHandle);
364
365 if (pOpenDisp == NULL) {
366 return NULL;
367 }
368
369 return nvGetDpyEvoFromDispEvo(pOpenDisp->pDispEvo, dpyId);
370 }
371
372
373 /*!
374 * Get the NvKmsPerOpenFrameLock described by pOpen + frameLockHandle.
375 */
GetPerOpenFrameLock(const struct NvKmsPerOpen * pOpen,NvKmsFrameLockHandle frameLockHandle)376 static struct NvKmsPerOpenFrameLock *GetPerOpenFrameLock(
377 const struct NvKmsPerOpen *pOpen,
378 NvKmsFrameLockHandle frameLockHandle)
379 {
380 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl);
381
382 return nvEvoGetPointerFromApiHandle(&pOpen->ioctl.frameLockHandles,
383 frameLockHandle);
384 }
385
386
387 /*!
388 * Free the NvKmsPerOpenFrameLock associated with this NvKmsPerOpenDisp.
389 *
390 * Multiple disps can be assigned to the same framelock object, so
391 * NvKmsPerOpenFrameLock is reference counted: the object is freed
392 * once all NvKmsPerOpenDisps remove their reference to it.
393 *
394 * \param[in,out] pOpen The per-open data, to which the
395 * NvKmsPerOpenFrameLock is assigned.
396 * \param[in,out] pOpenDisp The NvKmsPerOpenDisp whose corresponding
397 * NvKmsPerOpenFrameLock should be freed.
398 */
FreePerOpenFrameLock(struct NvKmsPerOpen * pOpen,struct NvKmsPerOpenDisp * pOpenDisp)399 static void FreePerOpenFrameLock(struct NvKmsPerOpen *pOpen,
400 struct NvKmsPerOpenDisp *pOpenDisp)
401 {
402 struct NvKmsPerOpenFrameLock *pOpenFrameLock;
403
404 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl);
405
406 pOpenFrameLock =
407 nvEvoGetPointerFromApiHandle(&pOpen->ioctl.frameLockHandles,
408 pOpenDisp->frameLockHandle);
409 if (pOpenFrameLock == NULL) {
410 return;
411 }
412
413 pOpenDisp->frameLockHandle = 0;
414
415 pOpenFrameLock->refCnt--;
416
417 if (pOpenFrameLock->refCnt != 0) {
418 return;
419 }
420
421 nvEvoDestroyApiHandle(&pOpen->ioctl.frameLockHandles,
422 pOpenFrameLock->nvKmsApiHandle);
423 nvFree(pOpenFrameLock);
424 }
425
426
427 /*!
428 * Allocate and initialize an NvKmsPerOpenFrameLock.
429 *
430 * If the disp described by the specified NvKmsPerOpenDisp has a
431 * framelock object, allocate an NvKmsPerOpenFrameLock for it.
432 *
433 * Multiple disps can be assigned to the same framelock object, so
434 * NvKmsPerOpenFrameLock is reference counted: we first look to see if
435 * an NvKmsPerOpenFrameLock for this disp's framelock object already
436 * exists. If so, we increment its reference count. Otherwise, we
437 * allocate a new NvKmsPerOpenFrameLock.
438 *
439 * \param[in,out] pOpen The per-open data, to which the
440 * new NvKmsPerOpenFrameLock should be assigned.
441 * \param[in,out] pOpenDisp The NvKmsPerOpenDisp whose corresponding
442 * NvKmsPerOpenFrameLock should be allocated.
443 */
AllocPerOpenFrameLock(struct NvKmsPerOpen * pOpen,struct NvKmsPerOpenDisp * pOpenDisp)444 static NvBool AllocPerOpenFrameLock(
445 struct NvKmsPerOpen *pOpen,
446 struct NvKmsPerOpenDisp *pOpenDisp)
447 {
448 struct NvKmsPerOpenFrameLock *pOpenFrameLock;
449 NVDispEvoPtr pDispEvo = pOpenDisp->pDispEvo;
450 NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo;
451 NvKmsGenericHandle handle;
452
453 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl);
454
455 if (pFrameLockEvo == NULL) {
456 return TRUE;
457 }
458
459 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.frameLockHandles,
460 pOpenFrameLock, handle) {
461 if (pOpenFrameLock->pFrameLockEvo == pFrameLockEvo) {
462 goto done;
463 }
464 }
465
466 pOpenFrameLock = nvCalloc(1, sizeof(*pOpenFrameLock));
467
468 if (pOpenFrameLock == NULL) {
469 return FALSE;
470 }
471
472 pOpenFrameLock->pFrameLockEvo = pFrameLockEvo;
473 pOpenFrameLock->nvKmsApiHandle =
474 nvEvoCreateApiHandle(&pOpen->ioctl.frameLockHandles, pOpenFrameLock);
475
476 if (pOpenFrameLock->nvKmsApiHandle == 0) {
477 nvFree(pOpenFrameLock);
478 return FALSE;
479 }
480
481 done:
482 pOpenDisp->frameLockHandle = pOpenFrameLock->nvKmsApiHandle;
483 pOpenFrameLock->refCnt++;
484 return TRUE;
485 }
486
487
488 /*!
489 * Get the NvKmsConnectorHandle that corresponds to the given
490 * NVConnectorEvoRec on the NvKmsPerOpen + deviceHandle + dispHandle.
491 */
ConnectorEvoToConnectorHandle(const struct NvKmsPerOpen * pOpen,const NvKmsDeviceHandle deviceHandle,const NvKmsDispHandle dispHandle,const NVConnectorEvoRec * pConnectorEvo)492 static NvKmsConnectorHandle ConnectorEvoToConnectorHandle(
493 const struct NvKmsPerOpen *pOpen,
494 const NvKmsDeviceHandle deviceHandle,
495 const NvKmsDispHandle dispHandle,
496 const NVConnectorEvoRec *pConnectorEvo)
497 {
498 struct NvKmsPerOpenDisp *pOpenDisp;
499 struct NvKmsPerOpenConnector *pOpenConnector;
500 NvKmsGenericHandle connector;
501
502 pOpenDisp = GetPerOpenDisp(pOpen, deviceHandle, dispHandle);
503
504 if (pOpenDisp == NULL) {
505 return 0;
506 }
507
508 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDisp->connectorHandles,
509 pOpenConnector, connector) {
510 if (pOpenConnector->pConnectorEvo == pConnectorEvo) {
511 return pOpenConnector->nvKmsApiHandle;
512 }
513 }
514
515 return 0;
516 }
517
518
519 /*!
520 * Get the NvKmsDeviceHandle and NvKmsDispHandle that corresponds to
521 * the given NVDispEvoRec on the NvKmsPerOpen.
522 */
DispEvoToDevAndDispHandles(const struct NvKmsPerOpen * pOpen,const NVDispEvoRec * pDispEvo,NvKmsDeviceHandle * pDeviceHandle,NvKmsDispHandle * pDispHandle)523 static NvBool DispEvoToDevAndDispHandles(
524 const struct NvKmsPerOpen *pOpen,
525 const NVDispEvoRec *pDispEvo,
526 NvKmsDeviceHandle *pDeviceHandle,
527 NvKmsDispHandle *pDispHandle)
528 {
529 NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo;
530 struct NvKmsPerOpenDev *pOpenDev;
531 NvKmsGenericHandle dev;
532
533 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl);
534
535 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles,
536 pOpenDev, dev) {
537
538 struct NvKmsPerOpenDisp *pOpenDisp;
539 NvKmsGenericHandle disp;
540
541 if (pOpenDev->pDevEvo != pDevEvo) {
542 continue;
543 }
544
545 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles,
546 pOpenDisp, disp) {
547 if (pOpenDisp->pDispEvo != pDispEvo) {
548 continue;
549 }
550
551 *pDeviceHandle = pOpenDev->nvKmsApiHandle;
552 *pDispHandle = pOpenDisp->nvKmsApiHandle;
553
554 return TRUE;
555 }
556 }
557
558 return FALSE;
559 }
560
561
562 /*!
563 * Get the NvKmsPerOpenDev that corresponds to the given NVDevEvoRec
564 * on the NvKmsPerOpen.
565 */
DevEvoToOpenDev(const struct NvKmsPerOpen * pOpen,const NVDevEvoRec * pDevEvo)566 static struct NvKmsPerOpenDev *DevEvoToOpenDev(
567 const struct NvKmsPerOpen *pOpen,
568 const NVDevEvoRec *pDevEvo)
569 {
570 struct NvKmsPerOpenDev *pOpenDev;
571 NvKmsGenericHandle dev;
572
573 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl);
574
575 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles,
576 pOpenDev, dev) {
577 if (pOpenDev->pDevEvo == pDevEvo) {
578 return pOpenDev;
579 }
580 }
581
582 return NULL;
583 }
584
585
586 /*!
587 * Get the NvKmsFrameLockHandle that corresponds to the given
588 * NVFrameLockEvoRec on the NvKmsPerOpen.
589 */
FrameLockEvoToFrameLockHandle(const struct NvKmsPerOpen * pOpen,const NVFrameLockEvoRec * pFrameLockEvo,NvKmsFrameLockHandle * pFrameLockHandle)590 static NvBool FrameLockEvoToFrameLockHandle(
591 const struct NvKmsPerOpen *pOpen,
592 const NVFrameLockEvoRec *pFrameLockEvo,
593 NvKmsFrameLockHandle *pFrameLockHandle)
594 {
595 struct NvKmsPerOpenFrameLock *pOpenFrameLock;
596 NvKmsGenericHandle handle;
597
598 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl);
599
600 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.frameLockHandles,
601 pOpenFrameLock, handle) {
602
603 if (pOpenFrameLock->pFrameLockEvo == pFrameLockEvo) {
604 *pFrameLockHandle = pOpenFrameLock->nvKmsApiHandle;
605 return TRUE;
606 }
607 }
608
609 return FALSE;
610 }
611
612
613 /*!
614 * Clear the specified NvKmsPerOpenConnector.
615 *
616 * \param[in,out] pOpenDisp The NvKmsPerOpenDisp to which the
617 * NvKmsPerOpenConnector is assigned.
618 * \param[in,out] pOpenConnector The NvKmsPerOpenConnector to be cleared.
619 */
ClearPerOpenConnector(struct NvKmsPerOpenDisp * pOpenDisp,struct NvKmsPerOpenConnector * pOpenConnector)620 static void ClearPerOpenConnector(
621 struct NvKmsPerOpenDisp *pOpenDisp,
622 struct NvKmsPerOpenConnector *pOpenConnector)
623 {
624 nvEvoDestroyApiHandle(&pOpenDisp->connectorHandles,
625 pOpenConnector->nvKmsApiHandle);
626 nvkms_memset(pOpenConnector, 0, sizeof(*pOpenConnector));
627 }
628
629
630 /*!
631 * Initialize an NvKmsPerOpenConnector.
632 *
633 * \param[in,out] pOpenDisp The NvKmsPerOpenDisp to which the
634 * NvKmsPerOpenConnector is assigned.
635 * \param[in,out] pOpenConnector The NvKmsPerOpenConnector to initialize.
636 * \param[in] pConnectorEvo The connector that the NvKmsPerOpenConnector
637 * corresponds to.
638 *
639 * \return If the NvKmsPerOpenConnector is successfully initialized,
640 * return TRUE. Otherwise, return FALSE.
641 */
InitPerOpenConnector(struct NvKmsPerOpenDisp * pOpenDisp,struct NvKmsPerOpenConnector * pOpenConnector,NVConnectorEvoPtr pConnectorEvo)642 static NvBool InitPerOpenConnector(
643 struct NvKmsPerOpenDisp *pOpenDisp,
644 struct NvKmsPerOpenConnector *pOpenConnector,
645 NVConnectorEvoPtr pConnectorEvo)
646 {
647 pOpenConnector->nvKmsApiHandle =
648 nvEvoCreateApiHandle(&pOpenDisp->connectorHandles, pOpenConnector);
649
650 if (pOpenConnector->nvKmsApiHandle == 0) {
651 goto fail;
652 }
653
654 pOpenConnector->pConnectorEvo = pConnectorEvo;
655
656 return TRUE;
657
658 fail:
659 ClearPerOpenConnector(pOpenDisp, pOpenConnector);
660 return FALSE;
661 }
662
663 /*!
664 * Clear the specified NvKmsPerOpenDisp.
665 *
666 * \param[in,out] pOpenDev The NvKmsPerOpenDev to which the NvKmsPerOpenDisp
667 * is assigned.
668 * \param[in,out] pDispEvo The NvKmsPerOpenDisp to be cleared.
669 */
ClearPerOpenDisp(struct NvKmsPerOpen * pOpen,struct NvKmsPerOpenDev * pOpenDev,struct NvKmsPerOpenDisp * pOpenDisp)670 static void ClearPerOpenDisp(
671 struct NvKmsPerOpen *pOpen,
672 struct NvKmsPerOpenDev *pOpenDev,
673 struct NvKmsPerOpenDisp *pOpenDisp)
674 {
675 struct NvKmsPerOpenConnector *pOpenConnector;
676 NvKmsGenericHandle connector;
677
678 NVVBlankCallbackPtr pCallbackData;
679 NvKmsGenericHandle callback;
680
681 FreePerOpenFrameLock(pOpen, pOpenDisp);
682
683 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDisp->connectorHandles,
684 pOpenConnector, connector) {
685 ClearPerOpenConnector(pOpenDisp, pOpenConnector);
686 }
687
688 /* Destroy the API handle structures. */
689 nvEvoDestroyApiHandles(&pOpenDisp->connectorHandles);
690
691 for (NvU32 i = 0; i < NVKMS_MAX_HEADS_PER_DISP; i++) {
692 nvEvoDestroyApiHandles(&pOpenDisp->vblankSyncObjectHandles[i]);
693
694 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDisp->vblankCallbackHandles[i],
695 pCallbackData, callback) {
696 nvRemoveUnicastEvent(pCallbackData->pUserData);
697 }
698 nvEvoDestroyApiHandles(&pOpenDisp->vblankCallbackHandles[i]);
699 }
700
701 nvEvoDestroyApiHandles(&pOpenDisp->vblankSemControlHandles);
702
703 nvEvoDestroyApiHandle(&pOpenDev->dispHandles, pOpenDisp->nvKmsApiHandle);
704
705 nvkms_memset(pOpenDisp, 0, sizeof(*pOpenDisp));
706 }
707
708
709 /*!
710 * Initialize an NvKmsPerOpenDisp.
711 *
712 * \param[in,out] pOpenDev The NvKmsPerOpenDev to which the NvKmsPerOpenDisp
713 * is assigned.
714 * \param[in,out] pOpenDisp The NvKmsPerOpenDisp to initialize.
715 * \param[in] pDispEvo The disp that the NvKmsPerOpenDisp corresponds to.
716 *
717 * \return If the NvKmsPerOpenDisp is successfully initialized, return TRUE.
718 * Otherwise, return FALSE.
719 */
InitPerOpenDisp(struct NvKmsPerOpen * pOpen,struct NvKmsPerOpenDev * pOpenDev,struct NvKmsPerOpenDisp * pOpenDisp,NVDispEvoPtr pDispEvo)720 static NvBool InitPerOpenDisp(
721 struct NvKmsPerOpen *pOpen,
722 struct NvKmsPerOpenDev *pOpenDev,
723 struct NvKmsPerOpenDisp *pOpenDisp,
724 NVDispEvoPtr pDispEvo)
725 {
726 NVConnectorEvoPtr pConnectorEvo;
727 NvU32 connector;
728
729 pOpenDisp->nvKmsApiHandle =
730 nvEvoCreateApiHandle(&pOpenDev->dispHandles, pOpenDisp);
731
732 if (pOpenDisp->nvKmsApiHandle == 0) {
733 goto fail;
734 }
735
736 pOpenDisp->pDispEvo = pDispEvo;
737
738 if (nvListCount(&pDispEvo->connectorList) >=
739 ARRAY_LEN(pOpenDisp->connector)) {
740 nvAssert(!"More connectors on this disp than NVKMS can handle.");
741 goto fail;
742 }
743
744 if (!nvEvoInitApiHandles(&pOpenDisp->connectorHandles,
745 ARRAY_LEN(pOpenDisp->connector))) {
746 goto fail;
747 }
748
749 connector = 0;
750 FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) {
751 if (!InitPerOpenConnector(pOpenDisp, &pOpenDisp->connector[connector],
752 pConnectorEvo)) {
753 goto fail;
754 }
755 connector++;
756 }
757
758 /* Initialize the vblankSyncObjectHandles for each head. */
759 for (NvU32 i = 0; i < NVKMS_MAX_HEADS_PER_DISP; i++) {
760 if (!nvEvoInitApiHandles(&pOpenDisp->vblankSyncObjectHandles[i],
761 NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD)) {
762 goto fail;
763 }
764 }
765
766 /* Initialize the vblankCallbackHandles for each head.
767 *
768 * The initial value of VBLANK_SYNC_OBJECTS_PER_HEAD doesn't really apply
769 * here, but we need something. */
770 for (NvU32 i = 0; i < NVKMS_MAX_HEADS_PER_DISP; i++) {
771 if (!nvEvoInitApiHandles(&pOpenDisp->vblankCallbackHandles[i],
772 NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD)) {
773 goto fail;
774 }
775 }
776
777 /* Initialize the vblankSemControlHandles.
778 *
779 * The initial value of VBLANK_SYNC_OBJECTS_PER_HEAD doesn't really apply
780 * here, but we need something. */
781 if (!nvEvoInitApiHandles(&pOpenDisp->vblankSemControlHandles,
782 NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD)) {
783 goto fail;
784 }
785
786 if (!AllocPerOpenFrameLock(pOpen, pOpenDisp)) {
787 goto fail;
788 }
789
790 return TRUE;
791
792 fail:
793 ClearPerOpenDisp(pOpen, pOpenDev, pOpenDisp);
794 return FALSE;
795 }
796
797 /*!
798 * Free any SwapGroups tracked by this pOpenDev.
799 */
FreeSwapGroups(struct NvKmsPerOpenDev * pOpenDev)800 static void FreeSwapGroups(struct NvKmsPerOpenDev *pOpenDev)
801 {
802 NVSwapGroupRec *pSwapGroup;
803 NvKmsSwapGroupHandle handle;
804 NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo;
805
806 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->swapGroupHandles,
807 pSwapGroup,
808 handle) {
809 nvEvoDestroyApiHandle(&pOpenDev->swapGroupHandles, handle);
810
811 if (nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) {
812 nvHsFreeSwapGroup(pDevEvo, pSwapGroup);
813 } else {
814 nvHsDecrementSwapGroupRefCnt(pSwapGroup);
815 }
816 }
817 }
818
819 /*!
820 * Check that the NvKmsPermissions make sense.
821 */
ValidateNvKmsPermissions(const NVDevEvoRec * pDevEvo,const struct NvKmsPermissions * pPermissions,enum NvKmsClientType clientType)822 static NvBool ValidateNvKmsPermissions(
823 const NVDevEvoRec *pDevEvo,
824 const struct NvKmsPermissions *pPermissions,
825 enum NvKmsClientType clientType)
826 {
827 if (pPermissions->type == NV_KMS_PERMISSIONS_TYPE_FLIPPING) {
828 NvU32 d, h;
829
830 for (d = 0; d < ARRAY_LEN(pPermissions->flip.disp); d++) {
831 for (h = 0; h < ARRAY_LEN(pPermissions->flip.disp[d].head); h++) {
832
833 NvU8 layerMask = pPermissions->flip.disp[d].head[h].layerMask;
834
835 if (layerMask == 0) {
836 continue;
837 }
838
839 if (nvHasBitAboveMax(layerMask, pDevEvo->apiHead[h].numLayers)) {
840 return FALSE;
841 }
842
843 /*
844 * If the above blocks didn't 'continue', then there
845 * are permissions specified for this disp+head. Is
846 * the specified disp+head in range for the current
847 * configuration?
848 */
849 if (d >= pDevEvo->nDispEvo) {
850 return FALSE;
851 }
852
853 if (h >= pDevEvo->numApiHeads) {
854 return FALSE;
855 }
856 }
857 }
858 } else if (pPermissions->type == NV_KMS_PERMISSIONS_TYPE_MODESET) {
859 NvU32 d, h;
860
861 for (d = 0; d < ARRAY_LEN(pPermissions->flip.disp); d++) {
862 for (h = 0; h < ARRAY_LEN(pPermissions->flip.disp[d].head); h++) {
863
864 NVDpyIdList dpyIdList =
865 pPermissions->modeset.disp[d].head[h].dpyIdList;
866
867 if (nvDpyIdListIsEmpty(dpyIdList)) {
868 continue;
869 }
870
871 /*
872 * If the above blocks didn't 'continue', then there
873 * are permissions specified for this disp+head. Is
874 * the specified disp+head in range for the current
875 * configuration?
876 */
877 if (d >= pDevEvo->nDispEvo) {
878 return FALSE;
879 }
880
881 if (h >= pDevEvo->numApiHeads) {
882 return FALSE;
883 }
884 }
885 }
886 } else if (pPermissions->type == NV_KMS_PERMISSIONS_TYPE_SUB_OWNER) {
887
888 /* Only kapi uses this permission type, so disallow it from userspace */
889 if (clientType != NVKMS_CLIENT_KERNEL_SPACE) {
890 return FALSE;
891 }
892
893 } else {
894 return FALSE;
895 }
896
897 return TRUE;
898 }
899
900 /*!
901 * Assign pPermissions with the maximum permissions possible for
902 * the pDevEvo.
903 */
AssignFullNvKmsFlipPermissions(const NVDevEvoRec * pDevEvo,struct NvKmsFlipPermissions * pPermissions)904 static void AssignFullNvKmsFlipPermissions(
905 const NVDevEvoRec *pDevEvo,
906 struct NvKmsFlipPermissions *pPermissions)
907 {
908 NvU32 dispIndex, apiHead;
909
910 nvkms_memset(pPermissions, 0, sizeof(*pPermissions));
911
912 for (dispIndex = 0; dispIndex < pDevEvo->nDispEvo; dispIndex++) {
913 for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) {
914 pPermissions->disp[dispIndex].head[apiHead].layerMask =
915 NVBIT(pDevEvo->apiHead[apiHead].numLayers) - 1;
916 }
917 }
918 }
919
AssignFullNvKmsModesetPermissions(const NVDevEvoRec * pDevEvo,struct NvKmsModesetPermissions * pPermissions)920 static void AssignFullNvKmsModesetPermissions(
921 const NVDevEvoRec *pDevEvo,
922 struct NvKmsModesetPermissions *pPermissions)
923 {
924 NvU32 dispIndex, apiHead;
925
926 nvkms_memset(pPermissions, 0, sizeof(*pPermissions));
927
928 for (dispIndex = 0; dispIndex < pDevEvo->nDispEvo; dispIndex++) {
929 for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) {
930 pPermissions->disp[dispIndex].head[apiHead].dpyIdList =
931 nvAllDpyIdList();
932 }
933 }
934 }
935
AssignFullNvKmsPermissions(struct NvKmsPerOpenDev * pOpenDev)936 static void AssignFullNvKmsPermissions(
937 struct NvKmsPerOpenDev *pOpenDev
938 )
939 {
940 NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo;
941
942 AssignFullNvKmsFlipPermissions(pDevEvo, &pOpenDev->flipPermissions);
943 AssignFullNvKmsModesetPermissions(pDevEvo, &pOpenDev->modesetPermissions);
944 }
945
946 /*!
947 * Set the modeset owner to pOpenDev
948 *
949 * \param pOpenDev The per-open device structure for the new modeset owner.
950 * \return FALSE if there was already a modeset owner. TRUE otherwise.
951 */
GrabModesetOwnership(struct NvKmsPerOpenDev * pOpenDev)952 static NvBool GrabModesetOwnership(struct NvKmsPerOpenDev *pOpenDev)
953 {
954 NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo;
955
956 if (pDevEvo->modesetOwner == pOpenDev) {
957 return TRUE;
958 }
959
960 if (pDevEvo->modesetOwner != NULL) {
961 return FALSE;
962 }
963
964 /*
965 * If claiming modeset ownership, undo any SST forcing imposed by
966 * console restore.
967 */
968 if (pOpenDev != pDevEvo->pNvKmsOpenDev) {
969 nvDPSetAllowMultiStreaming(pDevEvo, TRUE /* allowMST */);
970 }
971
972 pDevEvo->modesetOwner = pOpenDev;
973 pDevEvo->modesetOwnerChanged = TRUE;
974
975 AssignFullNvKmsPermissions(pOpenDev);
976 return TRUE;
977 }
978
979 /*
980 * If not NULL, remove pRemoveFlip from pFlip. Returns true if there are still
981 * some remaining permissions.
982 */
RemoveFlipPermissions(struct NvKmsFlipPermissions * pFlip,const struct NvKmsFlipPermissions * pRemoveFlip)983 static NvBool RemoveFlipPermissions(struct NvKmsFlipPermissions *pFlip,
984 const struct NvKmsFlipPermissions *pRemoveFlip)
985 {
986 NvU32 d, h, dLen, hLen;
987 NvBool remainingPermissions = FALSE;
988
989 dLen = ARRAY_LEN(pFlip->disp);
990 for (d = 0; d < dLen; d++) {
991 hLen = ARRAY_LEN(pFlip->disp[d].head);
992 for (h = 0; h < hLen; h++) {
993
994 if (pRemoveFlip) {
995 pFlip->disp[d].head[h].layerMask &=
996 ~pRemoveFlip->disp[d].head[h].layerMask;
997 }
998
999 remainingPermissions |= (pFlip->disp[d].head[h].layerMask != 0);
1000 }
1001 }
1002
1003 return remainingPermissions;
1004 }
1005
1006 /*
1007 * If not NULL, remove pRemoveModeset from pModeset. Returns true if there are
1008 * still some remaining permissions.
1009 */
RemoveModesetPermissions(struct NvKmsModesetPermissions * pModeset,const struct NvKmsModesetPermissions * pRemoveModeset)1010 static NvBool RemoveModesetPermissions(struct NvKmsModesetPermissions *pModeset,
1011 const struct NvKmsModesetPermissions *pRemoveModeset)
1012 {
1013 NvU32 d, h, dLen, hLen;
1014 NvBool remainingPermissions = FALSE;
1015
1016 dLen = ARRAY_LEN(pModeset->disp);
1017 for (d = 0; d < dLen; d++) {
1018 hLen = ARRAY_LEN(pModeset->disp[d].head);
1019 for (h = 0; h < hLen; h++) {
1020
1021 if (pRemoveModeset) {
1022 pModeset->disp[d].head[h].dpyIdList = nvDpyIdListMinusDpyIdList(
1023 pModeset->disp[d].head[h].dpyIdList,
1024 pRemoveModeset->disp[d].head[h].dpyIdList);
1025 }
1026
1027 remainingPermissions |=
1028 !nvDpyIdListIsEmpty(pModeset->disp[d].head[h].dpyIdList);
1029 }
1030 }
1031
1032 return remainingPermissions;
1033 }
1034
1035 /*!
1036 * Clear permissions on the specified device for all NvKmsPerOpens.
1037 *
1038 * For NvKmsPerOpen::type==Ioctl, clear the permissions, except for the
1039 * specified pOpenDevExclude.
1040 *
1041 * For NvKmsPerOpen::type==GrantPermissions, clear
1042 * NvKmsPerOpen::grantPermissions and reset NvKmsPerOpen::type to
1043 * Undefined.
1044 */
RevokePermissionsInternal(const NvU32 typeBitmask,NVDevEvoRec * pDevEvo,const struct NvKmsPerOpenDev * pOpenDevExclude)1045 static void RevokePermissionsInternal(
1046 const NvU32 typeBitmask,
1047 NVDevEvoRec *pDevEvo,
1048 const struct NvKmsPerOpenDev *pOpenDevExclude)
1049 {
1050 struct NvKmsPerOpen *pOpen;
1051
1052 nvListForEachEntry(pOpen, &perOpenList, perOpenListEntry) {
1053
1054 if ((pOpen->type == NvKmsPerOpenTypeGrantPermissions) &&
1055 (pOpen->grantPermissions.pDevEvo == pDevEvo) &&
1056 (typeBitmask & NVBIT(pOpen->grantPermissions.permissions.type))) {
1057 nvkms_memset(&pOpen->grantPermissions, 0,
1058 sizeof(pOpen->grantPermissions));
1059 pOpen->type = NvKmsPerOpenTypeUndefined;
1060 }
1061
1062 if (pOpen->type == NvKmsPerOpenTypeIoctl) {
1063
1064 struct NvKmsPerOpenDev *pOpenDev =
1065 DevEvoToOpenDev(pOpen, pDevEvo);
1066
1067 if (pOpenDev == NULL) {
1068 continue;
1069 }
1070
1071 if (pOpenDev == pOpenDevExclude || pOpenDev->isPrivileged) {
1072 continue;
1073 }
1074
1075 if (pOpenDev == pDevEvo->modesetSubOwner &&
1076 (typeBitmask & NVBIT(NV_KMS_PERMISSIONS_TYPE_SUB_OWNER))) {
1077 FreeSwapGroups(pOpenDev);
1078 pDevEvo->modesetSubOwner = NULL;
1079 }
1080
1081 /*
1082 * Clients with sub-owner permission (or better) don't get flipping
1083 * or modeset permission revoked.
1084 */
1085 if (nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) {
1086 continue;
1087 }
1088
1089 if (typeBitmask & NVBIT(NV_KMS_PERMISSIONS_TYPE_FLIPPING)) {
1090 nvkms_memset(&pOpenDev->flipPermissions, 0,
1091 sizeof(pOpenDev->flipPermissions));
1092 }
1093
1094 if (typeBitmask & NVBIT(NV_KMS_PERMISSIONS_TYPE_MODESET)) {
1095 nvkms_memset(&pOpenDev->modesetPermissions, 0,
1096 sizeof(pOpenDev->modesetPermissions));
1097 }
1098 }
1099 }
1100 }
1101
RestoreConsole(NVDevEvoPtr pDevEvo)1102 static void RestoreConsole(NVDevEvoPtr pDevEvo)
1103 {
1104 // Try to issue a modeset and flip to the framebuffer console surface.
1105 if (!nvEvoRestoreConsole(pDevEvo, TRUE /* allowMST */)) {
1106 // If that didn't work, free the core channel to trigger RM's console
1107 // restore code.
1108 FreeSurfaceCtxDmasForAllOpens(pDevEvo);
1109 DisableAndCleanVblankSyncObjectForAllOpens(pDevEvo);
1110 nvFreeCoreChannelEvo(pDevEvo);
1111
1112 // Reallocate the core channel right after freeing it. This makes sure
1113 // that it's allocated and ready right away if another NVKMS client is
1114 // started.
1115 if (nvAllocCoreChannelEvo(pDevEvo)) {
1116 nvDPSetAllowMultiStreaming(pDevEvo, TRUE /* allowMST */);
1117 EnableAndSetupVblankSyncObjectForAllOpens(pDevEvo);
1118 AllocSurfaceCtxDmasForAllOpens(pDevEvo);
1119 }
1120 }
1121 }
1122
1123 /*!
1124 * Release modeset ownership previously set by GrabModesetOwnership
1125 *
1126 * \param pOpenDev The per-open device structure relinquishing modeset
1127 * ownership.
1128 * \return FALSE if pOpenDev is not the modeset owner, TRUE otherwise.
1129 */
ReleaseModesetOwnership(struct NvKmsPerOpenDev * pOpenDev)1130 static NvBool ReleaseModesetOwnership(struct NvKmsPerOpenDev *pOpenDev)
1131 {
1132 NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo;
1133
1134 if (pDevEvo->modesetOwner != pOpenDev) {
1135 // Only the current owner can release ownership.
1136 return FALSE;
1137 }
1138
1139 FreeSwapGroups(pOpenDev);
1140
1141 pDevEvo->modesetOwner = NULL;
1142 pDevEvo->modesetOwnerChanged = TRUE;
1143 pDevEvo->handleConsoleHotplugs = TRUE;
1144
1145 RestoreConsole(pDevEvo);
1146 RevokePermissionsInternal(NVBIT(NV_KMS_PERMISSIONS_TYPE_FLIPPING) |
1147 NVBIT(NV_KMS_PERMISSIONS_TYPE_MODESET) |
1148 NVBIT(NV_KMS_PERMISSIONS_TYPE_SUB_OWNER),
1149 pDevEvo, NULL /* pOpenDevExclude */);
1150 return TRUE;
1151 }
1152
1153 /*!
1154 * Free the specified NvKmsPerOpenDev.
1155 *
1156 * \param[in,out] pOpen The per-open data, to which the
1157 * NvKmsPerOpenDev is assigned.
1158 * \param[in,out] pOpenDev The NvKmsPerOpenDev to free.
1159 */
nvFreePerOpenDev(struct NvKmsPerOpen * pOpen,struct NvKmsPerOpenDev * pOpenDev)1160 void nvFreePerOpenDev(struct NvKmsPerOpen *pOpen,
1161 struct NvKmsPerOpenDev *pOpenDev)
1162 {
1163 struct NvKmsPerOpenDisp *pOpenDisp;
1164 NvKmsGenericHandle disp;
1165
1166 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl);
1167
1168 if (pOpenDev == NULL) {
1169 return;
1170 }
1171
1172 nvEvoDestroyApiHandles(&pOpenDev->surfaceHandles);
1173
1174 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles,
1175 pOpenDisp, disp) {
1176 ClearPerOpenDisp(pOpen, pOpenDev, pOpenDisp);
1177 }
1178
1179 nvEvoDestroyApiHandles(&pOpenDev->dispHandles);
1180
1181 nvEvoDestroyApiHandle(&pOpen->ioctl.devHandles, pOpenDev->nvKmsApiHandle);
1182
1183 nvEvoDestroyApiHandles(&pOpenDev->deferredRequestFifoHandles);
1184
1185 nvEvoDestroyApiHandles(&pOpenDev->swapGroupHandles);
1186
1187 nvFree(pOpenDev);
1188 }
1189
1190
1191 /*!
1192 * Allocate and initialize an NvKmsPerOpenDev.
1193 *
1194 * \param[in,out] pOpen The per-open data, to which the
1195 * new NvKmsPerOpenDev should be assigned.
1196 * \param[in] pDevEvo The device to which the new NvKmsPerOpenDev
1197 * corresponds.
1198 * \param[in] isPrivileged The NvKmsPerOpenDev is privileged which can
1199 * do modeset anytime.
1200 *
1201 * \return On success, return a pointer to the new NvKmsPerOpenDev.
1202 * On failure, return NULL.
1203 */
nvAllocPerOpenDev(struct NvKmsPerOpen * pOpen,NVDevEvoPtr pDevEvo,NvBool isPrivileged)1204 struct NvKmsPerOpenDev *nvAllocPerOpenDev(struct NvKmsPerOpen *pOpen,
1205 NVDevEvoPtr pDevEvo, NvBool isPrivileged)
1206 {
1207 struct NvKmsPerOpenDev *pOpenDev = nvCalloc(1, sizeof(*pOpenDev));
1208 NVDispEvoPtr pDispEvo;
1209 NvU32 disp;
1210
1211 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl);
1212
1213 if (pOpenDev == NULL) {
1214 goto fail;
1215 }
1216
1217 pOpenDev->nvKmsApiHandle =
1218 nvEvoCreateApiHandle(&pOpen->ioctl.devHandles, pOpenDev);
1219
1220 if (pOpenDev->nvKmsApiHandle == 0) {
1221 goto fail;
1222 }
1223
1224 pOpenDev->pDevEvo = pDevEvo;
1225
1226 if (!nvEvoInitApiHandles(&pOpenDev->dispHandles,
1227 ARRAY_LEN(pOpenDev->disp))) {
1228 goto fail;
1229 }
1230
1231 if (pDevEvo->nDispEvo > ARRAY_LEN(pOpenDev->disp)) {
1232 nvAssert(!"More disps on this device than NVKMS can handle.");
1233 goto fail;
1234 }
1235
1236 FOR_ALL_EVO_DISPLAYS(pDispEvo, disp, pDevEvo) {
1237 if (!InitPerOpenDisp(pOpen, pOpenDev, &pOpenDev->disp[disp], pDispEvo)) {
1238 goto fail;
1239 }
1240 }
1241
1242 if (!nvEvoInitApiHandles(&pOpenDev->surfaceHandles, 32)) {
1243 goto fail;
1244 }
1245
1246 pOpenDev->isPrivileged = isPrivileged;
1247 if (pOpenDev->isPrivileged) {
1248 AssignFullNvKmsPermissions(pOpenDev);
1249 }
1250
1251 if (!nvEvoInitApiHandles(&pOpenDev->deferredRequestFifoHandles, 4)) {
1252 goto fail;
1253 }
1254
1255 if (!nvEvoInitApiHandles(&pOpenDev->swapGroupHandles, 4)) {
1256 goto fail;
1257 }
1258
1259 return pOpenDev;
1260
1261 fail:
1262 nvFreePerOpenDev(pOpen, pOpenDev);
1263 return NULL;
1264 }
1265
1266
1267 /*!
1268 * Assign NvKmsPerOpen::type.
1269 *
1270 * This succeeds only if NvKmsPerOpen::type is Undefined, or already
1271 * has the requested type and allowRedundantAssignment is TRUE.
1272 */
AssignNvKmsPerOpenType(struct NvKmsPerOpen * pOpen,enum NvKmsPerOpenType type,NvBool allowRedundantAssignment)1273 static NvBool AssignNvKmsPerOpenType(struct NvKmsPerOpen *pOpen,
1274 enum NvKmsPerOpenType type,
1275 NvBool allowRedundantAssignment)
1276 {
1277 if ((pOpen->type == type) && allowRedundantAssignment) {
1278 return TRUE;
1279 }
1280
1281 if (pOpen->type != NvKmsPerOpenTypeUndefined) {
1282 return FALSE;
1283 }
1284
1285 switch (type) {
1286 case NvKmsPerOpenTypeIoctl:
1287 nvListInit(&pOpen->ioctl.eventList);
1288
1289 if (!nvEvoInitApiHandles(&pOpen->ioctl.devHandles, NV_MAX_DEVICES)) {
1290 return FALSE;
1291 }
1292
1293 if (!nvEvoInitApiHandles(&pOpen->ioctl.frameLockHandles, 4)) {
1294 nvEvoDestroyApiHandles(&pOpen->ioctl.devHandles);
1295 return FALSE;
1296 }
1297
1298 nvListAppend(&pOpen->perOpenIoctlListEntry, &perOpenIoctlList);
1299 break;
1300
1301 case NvKmsPerOpenTypeGrantSurface:
1302 /* Nothing to do, here. */
1303 break;
1304
1305 case NvKmsPerOpenTypeGrantSwapGroup:
1306 /* Nothing to do, here. */
1307 break;
1308
1309 case NvKmsPerOpenTypeGrantPermissions:
1310 /* Nothing to do, here. */
1311 break;
1312
1313 case NvKmsPerOpenTypeUnicastEvent:
1314 /* Nothing to do, here. */
1315 break;
1316
1317 case NvKmsPerOpenTypeUndefined:
1318 nvAssert(!"unexpected NvKmsPerOpenType");
1319 break;
1320 }
1321
1322 pOpen->type = type;
1323 return TRUE;
1324 }
1325
1326 /*!
1327 * Return whether the PerOpen can be used as a unicast event.
1328 */
PerOpenIsValidForUnicastEvent(const struct NvKmsPerOpen * pOpen)1329 static inline NvBool PerOpenIsValidForUnicastEvent(
1330 const struct NvKmsPerOpen *pOpen)
1331 {
1332 /* If the type is Undefined, it can be made a unicast event. */
1333
1334 if (pOpen->type == NvKmsPerOpenTypeUndefined) {
1335 return TRUE;
1336 }
1337
1338 /*
1339 * If the type is already UnicastEvent but there is no active user, it can
1340 * be made a unicast event.
1341 */
1342 if ((pOpen->type == NvKmsPerOpenTypeUnicastEvent) &&
1343 (pOpen->unicastEvent.type == NvKmsUnicastEventTypeUndefined)) {
1344 return TRUE;
1345 }
1346
1347 return FALSE;
1348 }
1349
1350 /*!
1351 * Allocate the specified device.
1352 */
AllocDevice(struct NvKmsPerOpen * pOpen,void * pParamsVoid)1353 static NvBool AllocDevice(struct NvKmsPerOpen *pOpen,
1354 void *pParamsVoid)
1355 {
1356 struct NvKmsAllocDeviceParams *pParams = pParamsVoid;
1357 NVDevEvoPtr pDevEvo;
1358 struct NvKmsPerOpenDev *pOpenDev;
1359 NvU32 disp, apiHead;
1360 NvU8 layer;
1361
1362 nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply));
1363
1364 if (nvkms_strcmp(pParams->request.versionString, NV_VERSION_STRING) != 0) {
1365 pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_VERSION_MISMATCH;
1366 return FALSE;
1367 }
1368
1369 /*
1370 * It is an error to call NVKMS_IOCTL_ALLOC_DEVICE multiple times
1371 * on the same device with the same fd.
1372 */
1373 if (DeviceIdAlreadyPresent(pOpen, pParams->request.deviceId)) {
1374 pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_BAD_REQUEST;
1375 return FALSE;
1376 }
1377
1378 pDevEvo = nvFindDevEvoByDeviceId(pParams->request.deviceId);
1379
1380 if (pDevEvo == NULL) {
1381 pDevEvo = nvAllocDevEvo(&pParams->request, &pParams->reply.status);
1382 if (pDevEvo == NULL) {
1383 return FALSE;
1384 }
1385 } else {
1386 if (!pParams->request.tryInferSliMosaicFromExistingDevice &&
1387 (pDevEvo->sli.mosaic != pParams->request.sliMosaic)) {
1388 pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_BAD_REQUEST;
1389 return FALSE;
1390 }
1391
1392 if (pDevEvo->usesTegraDevice &&
1393 (pParams->request.deviceId != NVKMS_DEVICE_ID_TEGRA)) {
1394 pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_BAD_REQUEST;
1395 return FALSE;
1396 }
1397 pDevEvo->allocRefCnt++;
1398 }
1399
1400 pOpenDev = nvAllocPerOpenDev(pOpen, pDevEvo, FALSE /* isPrivileged */);
1401
1402 if (pOpenDev == NULL) {
1403 nvFreeDevEvo(pDevEvo);
1404 pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR;
1405 return FALSE;
1406 }
1407
1408 /* Beyond this point, the function cannot fail. */
1409
1410 if (pParams->request.enableConsoleHotplugHandling) {
1411 pDevEvo->handleConsoleHotplugs = TRUE;
1412 }
1413
1414 pParams->reply.deviceHandle = pOpenDev->nvKmsApiHandle;
1415 pParams->reply.subDeviceMask =
1416 NV_TWO_N_MINUS_ONE(pDevEvo->numSubDevices);
1417 pParams->reply.numHeads = pDevEvo->numApiHeads;
1418 pParams->reply.numDisps = pDevEvo->nDispEvo;
1419
1420 ct_assert(ARRAY_LEN(pParams->reply.dispHandles) ==
1421 ARRAY_LEN(pOpenDev->disp));
1422
1423 for (disp = 0; disp < ARRAY_LEN(pParams->reply.dispHandles); disp++) {
1424 pParams->reply.dispHandles[disp] = pOpenDev->disp[disp].nvKmsApiHandle;
1425 }
1426
1427 pParams->reply.inputLutAppliesToBase = pDevEvo->caps.inputLutAppliesToBase;
1428
1429 ct_assert(ARRAY_LEN(pParams->reply.layerCaps) ==
1430 ARRAY_LEN(pDevEvo->caps.layerCaps));
1431
1432 for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) {
1433 pParams->reply.numLayers[apiHead] = pDevEvo->apiHead[apiHead].numLayers;
1434 }
1435
1436 for (layer = 0;
1437 layer < ARRAY_LEN(pParams->reply.layerCaps);
1438 layer++) {
1439 pParams->reply.layerCaps[layer] = pDevEvo->caps.layerCaps[layer];
1440 }
1441
1442 pParams->reply.surfaceAlignment = NV_EVO_SURFACE_ALIGNMENT;
1443 pParams->reply.requiresVrrSemaphores = !pDevEvo->hal->caps.supportsDisplayRate;
1444
1445 pParams->reply.nIsoSurfacesInVidmemOnly =
1446 !!NV5070_CTRL_SYSTEM_GET_CAP(pDevEvo->capsBits,
1447 NV5070_CTRL_SYSTEM_CAPS_BUG_644815_DNISO_VIDMEM_ONLY);
1448
1449 pParams->reply.requiresAllAllocationsInSysmem =
1450 pDevEvo->requiresAllAllocationsInSysmem;
1451 pParams->reply.supportsHeadSurface = pDevEvo->isHeadSurfaceSupported;
1452
1453 pParams->reply.validNIsoFormatMask = pDevEvo->caps.validNIsoFormatMask;
1454
1455 pParams->reply.maxWidthInBytes = pDevEvo->caps.maxWidthInBytes;
1456 pParams->reply.maxWidthInPixels = pDevEvo->caps.maxWidthInPixels;
1457 pParams->reply.maxHeightInPixels = pDevEvo->caps.maxHeight;
1458 pParams->reply.cursorCompositionCaps = pDevEvo->caps.cursorCompositionCaps;
1459
1460 pParams->reply.maxCursorSize = pDevEvo->cursorHal->caps.maxSize;
1461
1462 /* NVKMS swap groups and warp&blend depends on headSurface functionality. */
1463 pParams->reply.supportsSwapGroups = pDevEvo->isHeadSurfaceSupported;
1464 pParams->reply.supportsWarpAndBlend = pDevEvo->isHeadSurfaceSupported;
1465
1466 pParams->reply.validLayerRRTransforms = pDevEvo->caps.validLayerRRTransforms;
1467
1468 pParams->reply.isoIOCoherencyModes = pDevEvo->isoIOCoherencyModes;
1469 pParams->reply.nisoIOCoherencyModes = pDevEvo->nisoIOCoherencyModes;
1470
1471 /*
1472 * TODO: Replace the isSOCDisplay check with an RM query. See Bug 3689635.
1473 */
1474 pParams->reply.displayIsGpuL2Coherent = !pDevEvo->isSOCDisplay;
1475
1476 pParams->reply.supportsSyncpts = pDevEvo->supportsSyncpts;
1477
1478 pParams->reply.supportsIndependentAcqRelSemaphore =
1479 pDevEvo->hal->caps.supportsIndependentAcqRelSemaphore;
1480
1481 pParams->reply.supportsVblankSyncObjects =
1482 pDevEvo->hal->caps.supportsVblankSyncObjects;
1483
1484 pParams->reply.supportsVblankSemControl = pDevEvo->supportsVblankSemControl;
1485
1486 pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_SUCCESS;
1487
1488 return TRUE;
1489 }
1490
UnregisterDeferredRequestFifos(struct NvKmsPerOpenDev * pOpenDev)1491 static void UnregisterDeferredRequestFifos(struct NvKmsPerOpenDev *pOpenDev)
1492 {
1493 NVDeferredRequestFifoRec *pDeferredRequestFifo;
1494 NvKmsGenericHandle handle;
1495
1496 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->deferredRequestFifoHandles,
1497 pDeferredRequestFifo,
1498 handle) {
1499
1500 nvEvoDestroyApiHandle(&pOpenDev->deferredRequestFifoHandles, handle);
1501
1502 nvEvoUnregisterDeferredRequestFifo(pOpenDev->pDevEvo,
1503 pDeferredRequestFifo);
1504 }
1505 }
1506
1507 /*
1508 * Forward declaration since this function is used by
1509 * DisableRemainingVblankSyncObjects().
1510 */
1511 static void DisableAndCleanVblankSyncObject(NVDispEvoRec *pDispEvo,
1512 const NvU32 apiHead,
1513 NVVblankSyncObjectRec *pVblankSyncObject,
1514 NVEvoUpdateState *pUpdateState);
1515
DisableRemainingVblankSyncObjects(struct NvKmsPerOpen * pOpen,struct NvKmsPerOpenDev * pOpenDev)1516 static void DisableRemainingVblankSyncObjects(struct NvKmsPerOpen *pOpen,
1517 struct NvKmsPerOpenDev *pOpenDev)
1518 {
1519 struct NvKmsPerOpenDisp *pOpenDisp;
1520 NvKmsGenericHandle disp;
1521 NVVblankSyncObjectRec *pVblankSyncObject;
1522 NvKmsVblankSyncObjectHandle handle;
1523 NvU32 apiHead = 0;
1524
1525 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl);
1526
1527 if (pOpenDev == NULL) {
1528 return;
1529 }
1530
1531 /* For each pOpenDisp: */
1532 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles,
1533 pOpenDisp, disp) {
1534 /*
1535 * A single update state can handle changes across multiple heads on a
1536 * given Disp.
1537 */
1538 NVEvoUpdateState updateState = { };
1539
1540 /* For each head: */
1541 for (apiHead = 0; apiHead < ARRAY_LEN(pOpenDisp->vblankSyncObjectHandles); apiHead++) {
1542 NVEvoApiHandlesRec *pHandles =
1543 &pOpenDisp->vblankSyncObjectHandles[apiHead];
1544
1545 /* For each still-active vblank sync object: */
1546 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(pHandles,
1547 pVblankSyncObject, handle) {
1548 DisableAndCleanVblankSyncObject(pOpenDisp->pDispEvo, apiHead,
1549 pVblankSyncObject,
1550 &updateState);
1551 /* Remove the handle from the map. */
1552 nvEvoDestroyApiHandle(pHandles, handle);
1553 }
1554 }
1555
1556 if (!nvIsUpdateStateEmpty(pOpenDisp->pDispEvo->pDevEvo, &updateState)) {
1557 /*
1558 * Instruct hardware to execute the staged commands from the
1559 * ConfigureVblankSyncObject() calls (inherent in
1560 * DisableAndCleanVblankSyncObject()) above. This will set up
1561 * and wait for a notification that the hardware execution
1562 * has completed.
1563 */
1564 nvEvoUpdateAndKickOff(pOpenDisp->pDispEvo, TRUE, &updateState,
1565 TRUE);
1566 }
1567 }
1568 }
1569
DisableRemainingVblankSemControls(struct NvKmsPerOpen * pOpen,struct NvKmsPerOpenDev * pOpenDev)1570 static void DisableRemainingVblankSemControls(
1571 struct NvKmsPerOpen *pOpen,
1572 struct NvKmsPerOpenDev *pOpenDev)
1573 {
1574 struct NvKmsPerOpenDisp *pOpenDisp;
1575 NvKmsGenericHandle dispHandle;
1576 NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo;
1577
1578 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl);
1579
1580 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles,
1581 pOpenDisp,
1582 dispHandle) {
1583
1584 NVVblankSemControl *pVblankSemControl;
1585 NvKmsGenericHandle vblankSemControlHandle;
1586
1587 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDisp->vblankSemControlHandles,
1588 pVblankSemControl,
1589 vblankSemControlHandle) {
1590 NvBool ret =
1591 nvEvoDisableVblankSemControl(pDevEvo, pVblankSemControl);
1592
1593 if (!ret) {
1594 nvAssert(!"implicit disable of vblank sem control failed.");
1595 }
1596 nvEvoDestroyApiHandle(&pOpenDisp->vblankSemControlHandles,
1597 vblankSemControlHandle);
1598 }
1599 }
1600 }
1601
FreeDeviceReference(struct NvKmsPerOpen * pOpen,struct NvKmsPerOpenDev * pOpenDev)1602 static void FreeDeviceReference(struct NvKmsPerOpen *pOpen,
1603 struct NvKmsPerOpenDev *pOpenDev)
1604 {
1605 /* Disable all client-owned vblank sync objects that still exist. */
1606 DisableRemainingVblankSyncObjects(pOpen, pOpenDev);
1607
1608 DisableRemainingVblankSemControls(pOpen, pOpenDev);
1609
1610 FreeSwapGroups(pOpenDev);
1611
1612 UnregisterDeferredRequestFifos(pOpenDev);
1613
1614 nvEvoFreeClientSurfaces(pOpenDev->pDevEvo, pOpenDev,
1615 &pOpenDev->surfaceHandles);
1616
1617 if (!nvFreeDevEvo(pOpenDev->pDevEvo)) {
1618 // If this pOpenDev is the modeset owner, implicitly release it. Does
1619 // nothing if this pOpenDev is not the modeset owner.
1620 //
1621 // If nvFreeDevEvo() freed the device, then it also implicitly released
1622 // ownership.
1623 ReleaseModesetOwnership(pOpenDev);
1624
1625 nvAssert(pOpenDev->pDevEvo->modesetOwner != pOpenDev);
1626
1627 // If this pOpenDev is the modeset sub-owner, implicitly release it.
1628 if (pOpenDev->pDevEvo->modesetSubOwner == pOpenDev) {
1629 pOpenDev->pDevEvo->modesetSubOwner = NULL;
1630 }
1631 }
1632
1633 nvFreePerOpenDev(pOpen, pOpenDev);
1634 }
1635
1636 /*!
1637 * Free the specified device.
1638 */
FreeDevice(struct NvKmsPerOpen * pOpen,void * pParamsVoid)1639 static NvBool FreeDevice(struct NvKmsPerOpen *pOpen,
1640 void *pParamsVoid)
1641 {
1642 struct NvKmsFreeDeviceParams *pParams = pParamsVoid;
1643 struct NvKmsPerOpenDev *pOpenDev;
1644
1645 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle);
1646
1647 if (pOpenDev == NULL) {
1648 return FALSE;
1649 }
1650
1651 FreeDeviceReference(pOpen, pOpenDev);
1652
1653 return TRUE;
1654 }
1655
1656
1657 /*!
1658 * Get the disp data. This information should remain static for the
1659 * lifetime of the disp.
1660 */
QueryDisp(struct NvKmsPerOpen * pOpen,void * pParamsVoid)1661 static NvBool QueryDisp(struct NvKmsPerOpen *pOpen,
1662 void *pParamsVoid)
1663 {
1664 struct NvKmsQueryDispParams *pParams = pParamsVoid;
1665 struct NvKmsPerOpenDisp *pOpenDisp;
1666 const NVEvoSubDeviceRec *pSubDevice;
1667 NVDispEvoPtr pDispEvo;
1668 NvU32 connector;
1669
1670 pOpenDisp = GetPerOpenDisp(pOpen,
1671 pParams->request.deviceHandle,
1672 pParams->request.dispHandle);
1673 if (pOpenDisp == NULL) {
1674 return FALSE;
1675 }
1676
1677 nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply));
1678
1679 pDispEvo = pOpenDisp->pDispEvo;
1680
1681 // Don't include dynamic displays in validDpys. The data returned here is
1682 // supposed to be static for the lifetime of the pDispEvo.
1683 pParams->reply.validDpys =
1684 nvDpyIdListMinusDpyIdList(pDispEvo->validDisplays,
1685 pDispEvo->dynamicDpyIds);
1686 pParams->reply.bootDpys = pDispEvo->bootDisplays;
1687 pParams->reply.muxDpys = pDispEvo->muxDisplays;
1688 pParams->reply.frameLockHandle = pOpenDisp->frameLockHandle;
1689 pParams->reply.numConnectors = nvListCount(&pDispEvo->connectorList);
1690
1691 ct_assert(ARRAY_LEN(pParams->reply.connectorHandles) ==
1692 ARRAY_LEN(pOpenDisp->connector));
1693
1694 for (connector = 0; connector < ARRAY_LEN(pParams->reply.connectorHandles);
1695 connector++) {
1696 pParams->reply.connectorHandles[connector] =
1697 pOpenDisp->connector[connector].nvKmsApiHandle;
1698 }
1699
1700 pSubDevice = pDispEvo->pDevEvo->pSubDevices[pDispEvo->displayOwner];
1701 if (pSubDevice != NULL) {
1702 ct_assert(sizeof(pParams->reply.gpuString) >=
1703 sizeof(pSubDevice->gpuString));
1704 nvkms_memcpy(pParams->reply.gpuString, pSubDevice->gpuString,
1705 sizeof(pSubDevice->gpuString));
1706 }
1707
1708 return TRUE;
1709 }
1710
1711
1712 /*!
1713 * Get the connector static data. This information should remain static for the
1714 * lifetime of the connector.
1715 */
QueryConnectorStaticData(struct NvKmsPerOpen * pOpen,void * pParamsVoid)1716 static NvBool QueryConnectorStaticData(struct NvKmsPerOpen *pOpen,
1717 void *pParamsVoid)
1718 {
1719 struct NvKmsQueryConnectorStaticDataParams *pParams = pParamsVoid;
1720 struct NvKmsPerOpenConnector *pOpenConnector;
1721 NVConnectorEvoPtr pConnectorEvo;
1722
1723 pOpenConnector = GetPerOpenConnector(pOpen,
1724 pParams->request.deviceHandle,
1725 pParams->request.dispHandle,
1726 pParams->request.connectorHandle);
1727 if (pOpenConnector == NULL) {
1728 return FALSE;
1729 }
1730
1731 nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply));
1732
1733 pConnectorEvo = pOpenConnector->pConnectorEvo;
1734
1735 pParams->reply.dpyId = pConnectorEvo->displayId;
1736 pParams->reply.isDP = nvConnectorUsesDPLib(pConnectorEvo) ||
1737 nvConnectorIsDPSerializer(pConnectorEvo);
1738 pParams->reply.legacyTypeIndex = pConnectorEvo->legacyTypeIndex;
1739 pParams->reply.type = pConnectorEvo->type;
1740 pParams->reply.typeIndex = pConnectorEvo->typeIndex;
1741 pParams->reply.signalFormat = pConnectorEvo->signalFormat;
1742 pParams->reply.physicalIndex = pConnectorEvo->physicalIndex;
1743 pParams->reply.physicalLocation = pConnectorEvo->physicalLocation;
1744
1745 pParams->reply.isLvds =
1746 (pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) &&
1747 (pConnectorEvo->or.protocol ==
1748 NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM);
1749
1750 pParams->reply.locationOnChip = (pConnectorEvo->or.location ==
1751 NV0073_CTRL_SPECIFIC_OR_LOCATION_CHIP);
1752 return TRUE;
1753 }
1754
1755
1756 /*!
1757 * Get the connector dynamic data. This information should reflects changes to
1758 * the connector over time (e.g. for DisplayPort MST devices).
1759 */
QueryConnectorDynamicData(struct NvKmsPerOpen * pOpen,void * pParamsVoid)1760 static NvBool QueryConnectorDynamicData(struct NvKmsPerOpen *pOpen,
1761 void *pParamsVoid)
1762 {
1763 struct NvKmsQueryConnectorDynamicDataParams *pParams = pParamsVoid;
1764 struct NvKmsPerOpenConnector *pOpenConnector;
1765 NVConnectorEvoPtr pConnectorEvo;
1766 NVDispEvoPtr pDispEvo;
1767 NVDpyEvoPtr pDpyEvo;
1768
1769 pOpenConnector = GetPerOpenConnector(pOpen,
1770 pParams->request.deviceHandle,
1771 pParams->request.dispHandle,
1772 pParams->request.connectorHandle);
1773 if (pOpenConnector == NULL) {
1774 return FALSE;
1775 }
1776
1777 nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply));
1778
1779 pConnectorEvo = pOpenConnector->pConnectorEvo;
1780 pDispEvo = pConnectorEvo->pDispEvo;
1781
1782 if (nvConnectorUsesDPLib(pConnectorEvo)) {
1783 pParams->reply.detectComplete = pConnectorEvo->detectComplete;
1784 } else {
1785 pParams->reply.detectComplete = TRUE;
1786 }
1787
1788 // Find the dynamic dpys on this connector.
1789 pParams->reply.dynamicDpyIdList = nvEmptyDpyIdList();
1790 FOR_ALL_EVO_DPYS(pDpyEvo, pDispEvo->dynamicDpyIds, pDispEvo) {
1791 if (pDpyEvo->pConnectorEvo == pConnectorEvo) {
1792 pParams->reply.dynamicDpyIdList =
1793 nvAddDpyIdToDpyIdList(pDpyEvo->id,
1794 pParams->reply.dynamicDpyIdList);
1795 }
1796 }
1797
1798 return TRUE;
1799 }
1800
1801
1802 /*!
1803 * Get the static data for the specified dpy. This information should
1804 * remain static for the lifetime of the dpy.
1805 */
QueryDpyStaticData(struct NvKmsPerOpen * pOpen,void * pParamsVoid)1806 static NvBool QueryDpyStaticData(struct NvKmsPerOpen *pOpen,
1807 void *pParamsVoid)
1808 {
1809 struct NvKmsQueryDpyStaticDataParams *pParams = pParamsVoid;
1810 NVDpyEvoPtr pDpyEvo;
1811
1812 pDpyEvo = GetPerOpenDpy(pOpen,
1813 pParams->request.deviceHandle,
1814 pParams->request.dispHandle,
1815 pParams->request.dpyId);
1816 if (pDpyEvo == NULL) {
1817 return FALSE;
1818 }
1819
1820 nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply));
1821
1822 pParams->reply.connectorHandle =
1823 ConnectorEvoToConnectorHandle(pOpen,
1824 pParams->request.deviceHandle,
1825 pParams->request.dispHandle,
1826 pDpyEvo->pConnectorEvo);
1827 /*
1828 * All pConnectorEvos should have corresponding pOpenConnectors,
1829 * so we should always be able to find the NvKmsConnectorHandle.
1830 */
1831 nvAssert(pParams->reply.connectorHandle != 0);
1832
1833 pParams->reply.type = pDpyEvo->pConnectorEvo->legacyType;
1834
1835 if (pDpyEvo->dp.addressString != NULL) {
1836 const size_t len = nvkms_strlen(pDpyEvo->dp.addressString) + 1;
1837 nvkms_memcpy(pParams->reply.dpAddress, pDpyEvo->dp.addressString,
1838 NV_MIN(sizeof(pParams->reply.dpAddress), len));
1839 pParams->reply.dpAddress[sizeof(pParams->reply.dpAddress) - 1] = '\0';
1840 }
1841
1842 pParams->reply.mobileInternal = pDpyEvo->internal;
1843 pParams->reply.isDpMST = nvDpyEvoIsDPMST(pDpyEvo);
1844 pParams->reply.headMask = nvDpyGetPossibleApiHeadsMask(pDpyEvo);
1845
1846 return TRUE;
1847 }
1848
1849
1850 /*!
1851 * Get the dynamic data for the specified dpy. This information can
1852 * change when a hotplug occurs.
1853 */
QueryDpyDynamicData(struct NvKmsPerOpen * pOpen,void * pParamsVoid)1854 static NvBool QueryDpyDynamicData(struct NvKmsPerOpen *pOpen,
1855 void *pParamsVoid)
1856 {
1857 struct NvKmsQueryDpyDynamicDataParams *pParams = pParamsVoid;
1858 NVDpyEvoPtr pDpyEvo;
1859
1860 pDpyEvo = GetPerOpenDpy(pOpen,
1861 pParams->request.deviceHandle,
1862 pParams->request.dispHandle,
1863 pParams->request.dpyId);
1864 if (pDpyEvo == NULL) {
1865 return FALSE;
1866 }
1867
1868 return nvDpyGetDynamicData(pDpyEvo, pParams);
1869 }
1870
1871 /* Store a copy of the user's infoString pointer, so we can copy out to it when
1872 * we're done. */
1873 struct InfoStringExtraUserStateCommon
1874 {
1875 NvU64 userInfoString;
1876 };
1877
1878 /*
1879 * Allocate a kernel buffer to populate the infoString which will be copied out
1880 * to userspace upon completion.
1881 */
InfoStringPrepUserCommon(NvU32 infoStringSize,NvU64 * ppInfoString,struct InfoStringExtraUserStateCommon * pExtra)1882 static NvBool InfoStringPrepUserCommon(
1883 NvU32 infoStringSize,
1884 NvU64 *ppInfoString,
1885 struct InfoStringExtraUserStateCommon *pExtra)
1886 {
1887 char *kernelInfoString = NULL;
1888
1889 if (infoStringSize == 0) {
1890 *ppInfoString = 0;
1891 return TRUE;
1892 }
1893
1894 if (!nvKmsNvU64AddressIsSafe(*ppInfoString)) {
1895 return FALSE;
1896 }
1897
1898 if (infoStringSize > NVKMS_MODE_VALIDATION_MAX_INFO_STRING_LENGTH) {
1899 return FALSE;
1900 }
1901
1902 kernelInfoString = nvCalloc(1, infoStringSize);
1903 if (kernelInfoString == NULL) {
1904 return FALSE;
1905 }
1906
1907 pExtra->userInfoString = *ppInfoString;
1908 *ppInfoString = nvKmsPointerToNvU64(kernelInfoString);
1909
1910 return TRUE;
1911 }
1912
1913 /*
1914 * Copy the infoString out to userspace and free the kernel-internal buffer.
1915 */
InfoStringDoneUserCommon(NvU32 infoStringSize,NvU64 pInfoString,NvU32 * infoStringLenWritten,struct InfoStringExtraUserStateCommon * pExtra)1916 static NvBool InfoStringDoneUserCommon(
1917 NvU32 infoStringSize,
1918 NvU64 pInfoString,
1919 NvU32 *infoStringLenWritten,
1920 struct InfoStringExtraUserStateCommon *pExtra)
1921 {
1922 char *kernelInfoString = nvKmsNvU64ToPointer(pInfoString);
1923 int status;
1924 NvBool ret;
1925
1926 if ((infoStringSize == 0) || (*infoStringLenWritten == 0)) {
1927 ret = TRUE;
1928 goto done;
1929 }
1930
1931 nvAssert(*infoStringLenWritten <= infoStringSize);
1932
1933 status = nvkms_copyout(pExtra->userInfoString,
1934 kernelInfoString,
1935 *infoStringLenWritten);
1936 if (status == 0) {
1937 ret = TRUE;
1938 } else {
1939 ret = FALSE;
1940 *infoStringLenWritten = 0;
1941 }
1942
1943 done:
1944 nvFree(kernelInfoString);
1945
1946 return ret;
1947 }
1948
1949 struct NvKmsValidateModeIndexExtraUserState
1950 {
1951 struct InfoStringExtraUserStateCommon common;
1952 };
1953
ValidateModeIndexPrepUser(void * pParamsVoid,void * pExtraUserStateVoid)1954 static NvBool ValidateModeIndexPrepUser(
1955 void *pParamsVoid,
1956 void *pExtraUserStateVoid)
1957 {
1958 struct NvKmsValidateModeIndexParams *pParams = pParamsVoid;
1959 struct NvKmsValidateModeIndexExtraUserState *pExtra = pExtraUserStateVoid;
1960
1961 return InfoStringPrepUserCommon(
1962 pParams->request.infoStringSize,
1963 &pParams->request.pInfoString,
1964 &pExtra->common);
1965 }
1966
ValidateModeIndexDoneUser(void * pParamsVoid,void * pExtraUserStateVoid)1967 static NvBool ValidateModeIndexDoneUser(
1968 void *pParamsVoid,
1969 void *pExtraUserStateVoid)
1970 {
1971 struct NvKmsValidateModeIndexParams *pParams = pParamsVoid;
1972 struct NvKmsValidateModeIndexExtraUserState *pExtra = pExtraUserStateVoid;
1973
1974 return InfoStringDoneUserCommon(
1975 pParams->request.infoStringSize,
1976 pParams->request.pInfoString,
1977 &pParams->reply.infoStringLenWritten,
1978 &pExtra->common);
1979 }
1980
1981 /*!
1982 * Validate the requested mode.
1983 */
ValidateModeIndex(struct NvKmsPerOpen * pOpen,void * pParamsVoid)1984 static NvBool ValidateModeIndex(struct NvKmsPerOpen *pOpen,
1985 void *pParamsVoid)
1986 {
1987 struct NvKmsValidateModeIndexParams *pParams = pParamsVoid;
1988 NVDpyEvoPtr pDpyEvo;
1989
1990 pDpyEvo = GetPerOpenDpy(pOpen,
1991 pParams->request.deviceHandle,
1992 pParams->request.dispHandle,
1993 pParams->request.dpyId);
1994 if (pDpyEvo == NULL) {
1995 return FALSE;
1996 }
1997
1998 nvValidateModeIndex(pDpyEvo, &pParams->request, &pParams->reply);
1999
2000 return TRUE;
2001 }
2002
2003 struct NvKmsValidateModeExtraUserState
2004 {
2005 struct InfoStringExtraUserStateCommon common;
2006 };
2007
ValidateModePrepUser(void * pParamsVoid,void * pExtraUserStateVoid)2008 static NvBool ValidateModePrepUser(
2009 void *pParamsVoid,
2010 void *pExtraUserStateVoid)
2011 {
2012 struct NvKmsValidateModeParams *pParams = pParamsVoid;
2013 struct NvKmsValidateModeExtraUserState *pExtra = pExtraUserStateVoid;
2014
2015 return InfoStringPrepUserCommon(
2016 pParams->request.infoStringSize,
2017 &pParams->request.pInfoString,
2018 &pExtra->common);
2019 }
2020
ValidateModeDoneUser(void * pParamsVoid,void * pExtraUserStateVoid)2021 static NvBool ValidateModeDoneUser(
2022 void *pParamsVoid,
2023 void *pExtraUserStateVoid)
2024 {
2025 struct NvKmsValidateModeParams *pParams = pParamsVoid;
2026 struct NvKmsValidateModeExtraUserState *pExtra = pExtraUserStateVoid;
2027
2028 return InfoStringDoneUserCommon(
2029 pParams->request.infoStringSize,
2030 pParams->request.pInfoString,
2031 &pParams->reply.infoStringLenWritten,
2032 &pExtra->common);
2033 }
2034
2035 /*!
2036 * Validate the requested mode.
2037 */
ValidateMode(struct NvKmsPerOpen * pOpen,void * pParamsVoid)2038 static NvBool ValidateMode(struct NvKmsPerOpen *pOpen,
2039 void *pParamsVoid)
2040 {
2041 struct NvKmsValidateModeParams *pParams = pParamsVoid;
2042 NVDpyEvoPtr pDpyEvo;
2043
2044 pDpyEvo = GetPerOpenDpy(pOpen,
2045 pParams->request.deviceHandle,
2046 pParams->request.dispHandle,
2047 pParams->request.dpyId);
2048 if (pDpyEvo == NULL) {
2049 return FALSE;
2050 }
2051
2052 nvValidateModeEvo(pDpyEvo, &pParams->request, &pParams->reply);
2053
2054 return TRUE;
2055 }
2056
2057 static NvBool
CopyInOneLut(NvU64 pRampsUser,struct NvKmsLutRamps ** ppRampsKernel)2058 CopyInOneLut(NvU64 pRampsUser, struct NvKmsLutRamps **ppRampsKernel)
2059 {
2060 struct NvKmsLutRamps *pRampsKernel = NULL;
2061 int status;
2062
2063 if (pRampsUser == 0) {
2064 return TRUE;
2065 }
2066
2067 if (!nvKmsNvU64AddressIsSafe(pRampsUser)) {
2068 return FALSE;
2069 }
2070
2071 pRampsKernel = nvAlloc(sizeof(*pRampsKernel));
2072 if (!pRampsKernel) {
2073 return FALSE;
2074 }
2075
2076 status = nvkms_copyin((char *)pRampsKernel, pRampsUser,
2077 sizeof(*pRampsKernel));
2078 if (status != 0) {
2079 nvFree(pRampsKernel);
2080 return FALSE;
2081 }
2082
2083 *ppRampsKernel = pRampsKernel;
2084
2085 return TRUE;
2086 }
2087
2088 static NvBool
CopyInLutParams(struct NvKmsSetLutCommonParams * pCommonLutParams)2089 CopyInLutParams(struct NvKmsSetLutCommonParams *pCommonLutParams)
2090 {
2091 struct NvKmsLutRamps *pInputRamps = NULL;
2092 struct NvKmsLutRamps *pOutputRamps = NULL;
2093
2094 if (!CopyInOneLut(pCommonLutParams->input.pRamps, &pInputRamps)) {
2095 goto fail;
2096 }
2097 if (!CopyInOneLut(pCommonLutParams->output.pRamps, &pOutputRamps)) {
2098 goto fail;
2099 }
2100
2101 pCommonLutParams->input.pRamps = nvKmsPointerToNvU64(pInputRamps);
2102 pCommonLutParams->output.pRamps = nvKmsPointerToNvU64(pOutputRamps);
2103
2104 return TRUE;
2105
2106 fail:
2107 nvFree(pInputRamps);
2108 nvFree(pOutputRamps);
2109 return FALSE;
2110 }
2111
2112 static void
FreeCopiedInLutParams(struct NvKmsSetLutCommonParams * pCommonLutParams)2113 FreeCopiedInLutParams(struct NvKmsSetLutCommonParams *pCommonLutParams)
2114 {
2115 struct NvKmsLutRamps *pInputRamps =
2116 nvKmsNvU64ToPointer(pCommonLutParams->input.pRamps);
2117 struct NvKmsLutRamps *pOutputRamps =
2118 nvKmsNvU64ToPointer(pCommonLutParams->output.pRamps);
2119
2120 nvFree(pInputRamps);
2121 nvFree(pOutputRamps);
2122 }
2123
2124 /* No extra user state needed for SetMode; although we lose the user pointers
2125 * for the LUT ramps after copying them in, that's okay because we don't need
2126 * to copy them back out again. */
2127 struct NvKmsSetModeExtraUserState
2128 {
2129 };
2130
2131 /*!
2132 * Copy in any data referenced by pointer for the SetMode request. Currently
2133 * this is only the LUT ramps.
2134 */
SetModePrepUser(void * pParamsVoid,void * pExtraUserStateVoid)2135 static NvBool SetModePrepUser(
2136 void *pParamsVoid,
2137 void *pExtraUserStateVoid)
2138 {
2139 struct NvKmsSetModeParams *pParams = pParamsVoid;
2140 struct NvKmsSetModeRequest *pReq = &pParams->request;
2141 NvU32 disp, apiHead, dispFailed, apiHeadFailed;
2142
2143 /* Iterate over all of the common LUT ramp pointers embedded in the SetMode
2144 * request, and copy in each one. */
2145 for (disp = 0; disp < ARRAY_LEN(pReq->disp); disp++) {
2146 for (apiHead = 0; apiHead < ARRAY_LEN(pReq->disp[disp].head); apiHead++) {
2147 struct NvKmsSetLutCommonParams *pCommonLutParams =
2148 &pReq->disp[disp].head[apiHead].flip.lut;
2149
2150 if (!CopyInLutParams(pCommonLutParams)) {
2151 /* Remember how far we got through these loops before we
2152 * failed, so that we can undo everything up to this point. */
2153 dispFailed = disp;
2154 apiHeadFailed = apiHead;
2155 goto fail;
2156 }
2157 }
2158 }
2159
2160 return TRUE;
2161
2162 fail:
2163 for (disp = 0; disp < ARRAY_LEN(pReq->disp); disp++) {
2164 for (apiHead = 0; apiHead < ARRAY_LEN(pReq->disp[disp].head); apiHead++) {
2165 struct NvKmsSetLutCommonParams *pCommonLutParams =
2166 &pReq->disp[disp].head[apiHead].flip.lut;
2167
2168 if (disp > dispFailed ||
2169 (disp == dispFailed && apiHead >= apiHeadFailed)) {
2170 break;
2171 }
2172
2173 FreeCopiedInLutParams(pCommonLutParams);
2174 }
2175 }
2176
2177 return FALSE;
2178 }
2179
2180 /*!
2181 * Free buffers allocated in SetModePrepUser.
2182 */
SetModeDoneUser(void * pParamsVoid,void * pExtraUserStateVoid)2183 static NvBool SetModeDoneUser(
2184 void *pParamsVoid,
2185 void *pExtraUserStateVoid)
2186 {
2187 struct NvKmsSetModeParams *pParams = pParamsVoid;
2188 struct NvKmsSetModeRequest *pReq = &pParams->request;
2189 NvU32 disp, apiHead;
2190
2191 for (disp = 0; disp < ARRAY_LEN(pReq->disp); disp++) {
2192 for (apiHead = 0; apiHead < ARRAY_LEN(pReq->disp[disp].head); apiHead++) {
2193 struct NvKmsSetLutCommonParams *pCommonLutParams =
2194 &pReq->disp[disp].head[apiHead].flip.lut;
2195
2196 FreeCopiedInLutParams(pCommonLutParams);
2197 }
2198 }
2199
2200 return TRUE;
2201 }
2202
2203 /*!
2204 * Perform a modeset on the device.
2205 */
SetMode(struct NvKmsPerOpen * pOpen,void * pParamsVoid)2206 static NvBool SetMode(struct NvKmsPerOpen *pOpen,
2207 void *pParamsVoid)
2208 {
2209 struct NvKmsSetModeParams *pParams = pParamsVoid;
2210 struct NvKmsPerOpenDev *pOpenDev;
2211
2212 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle);
2213
2214 if (pOpenDev == NULL) {
2215 return FALSE;
2216 }
2217
2218 return nvSetDispModeEvo(pOpenDev->pDevEvo, pOpenDev,
2219 &pParams->request, &pParams->reply,
2220 FALSE /* bypassComposition */,
2221 TRUE /* doRasterLock */);
2222 }
2223
2224 /*!
2225 * Set the cursor image.
2226 */
SetCursorImage(struct NvKmsPerOpen * pOpen,void * pParamsVoid)2227 static NvBool SetCursorImage(struct NvKmsPerOpen *pOpen,
2228 void *pParamsVoid)
2229 {
2230 struct NvKmsSetCursorImageParams *pParams = pParamsVoid;
2231 struct NvKmsPerOpenDev *pOpenDev;
2232 struct NvKmsPerOpenDisp *pOpenDisp;
2233 NVDispEvoPtr pDispEvo;
2234
2235 if (!GetPerOpenDevAndDisp(pOpen,
2236 pParams->request.deviceHandle,
2237 pParams->request.dispHandle,
2238 &pOpenDev,
2239 &pOpenDisp)) {
2240 return FALSE;
2241 }
2242
2243 pDispEvo = pOpenDisp->pDispEvo;
2244
2245 if (!nvApiHeadIsActive(pDispEvo, pParams->request.head)) {
2246 return FALSE;
2247 }
2248
2249 return nvHsIoctlSetCursorImage(pDispEvo,
2250 pOpenDev,
2251 &pOpenDev->surfaceHandles,
2252 pParams->request.head,
2253 &pParams->request.common);
2254 }
2255
2256 /*!
2257 * Change the cursor position.
2258 */
MoveCursor(struct NvKmsPerOpen * pOpen,void * pParamsVoid)2259 static NvBool MoveCursor(struct NvKmsPerOpen *pOpen,
2260 void *pParamsVoid)
2261 {
2262 struct NvKmsMoveCursorParams *pParams = pParamsVoid;
2263 struct NvKmsPerOpenDisp *pOpenDisp;
2264 NVDispEvoPtr pDispEvo;
2265
2266 pOpenDisp = GetPerOpenDisp(pOpen,
2267 pParams->request.deviceHandle,
2268 pParams->request.dispHandle);
2269 if (pOpenDisp == NULL) {
2270 return FALSE;
2271 }
2272
2273 pDispEvo = pOpenDisp->pDispEvo;
2274
2275 if (!nvApiHeadIsActive(pDispEvo, pParams->request.head)) {
2276 return FALSE;
2277 }
2278
2279 return nvHsIoctlMoveCursor(pDispEvo,
2280 pParams->request.head,
2281 &pParams->request.common);
2282 }
2283
2284 /* No extra user state needed for SetLut; although we lose the user pointers
2285 * for the LUT ramps after copying them in, that's okay because we don't need
2286 * to copy them back out again. */
2287 struct NvKmsSetLutExtraUserState
2288 {
2289 };
2290
2291 /*!
2292 * Copy in any data referenced by pointer for the SetLut request. Currently
2293 * this is only the LUT ramps.
2294 */
SetLutPrepUser(void * pParamsVoid,void * pExtraUserStateVoid)2295 static NvBool SetLutPrepUser(
2296 void *pParamsVoid,
2297 void *pExtraUserStateVoid)
2298 {
2299 struct NvKmsSetLutParams *pParams = pParamsVoid;
2300 struct NvKmsSetLutCommonParams *pCommonLutParams = &pParams->request.common;
2301
2302 return CopyInLutParams(pCommonLutParams);
2303 }
2304
2305 /*!
2306 * Free buffers allocated in SetLutPrepUser.
2307 */
SetLutDoneUser(void * pParamsVoid,void * pExtraUserStateVoid)2308 static NvBool SetLutDoneUser(
2309 void *pParamsVoid,
2310 void *pExtraUserStateVoid)
2311 {
2312 struct NvKmsSetLutParams *pParams = pParamsVoid;
2313 struct NvKmsSetLutCommonParams *pCommonLutParams = &pParams->request.common;
2314
2315 FreeCopiedInLutParams(pCommonLutParams);
2316
2317 return TRUE;
2318 }
2319
2320 /*!
2321 * Set the LUT on the specified head.
2322 */
SetLut(struct NvKmsPerOpen * pOpen,void * pParamsVoid)2323 static NvBool SetLut(struct NvKmsPerOpen *pOpen,
2324 void *pParamsVoid)
2325 {
2326 struct NvKmsSetLutParams *pParams = pParamsVoid;
2327 struct NvKmsPerOpenDisp *pOpenDisp;
2328 NVDispEvoPtr pDispEvo;
2329
2330 pOpenDisp = GetPerOpenDisp(pOpen,
2331 pParams->request.deviceHandle,
2332 pParams->request.dispHandle);
2333 if (pOpenDisp == NULL) {
2334 return FALSE;
2335 }
2336
2337 pDispEvo = pOpenDisp->pDispEvo;
2338
2339 if (!nvApiHeadIsActive(pDispEvo, pParams->request.head)) {
2340 return FALSE;
2341 }
2342
2343 if (!nvValidateSetLutCommonParams(pDispEvo->pDevEvo,
2344 &pParams->request.common)) {
2345 return FALSE;
2346 }
2347
2348 nvEvoSetLut(pDispEvo,
2349 pParams->request.head, TRUE /* kickoff */,
2350 &pParams->request.common);
2351
2352 return TRUE;
2353 }
2354
2355
2356 /*!
2357 * Return whether the specified head is idle.
2358 */
IdleMainLayerChannelCheckIdleOneApiHead(NVDispEvoPtr pDispEvo,NvU32 apiHead)2359 static NvBool IdleMainLayerChannelCheckIdleOneApiHead(
2360 NVDispEvoPtr pDispEvo,
2361 NvU32 apiHead)
2362 {
2363 if (pDispEvo->pHsChannel[apiHead] != NULL) {
2364 return nvHsIdleFlipQueue(pDispEvo->pHsChannel[apiHead],
2365 FALSE /* force */);
2366 }
2367 return nvIdleMainLayerChannelCheckIdleOneApiHead(pDispEvo, apiHead);
2368 }
2369
2370 /*!
2371 * Return whether all heads described in pRequest are idle.
2372 *
2373 * Note that we loop over all requested heads, rather than return FALSE once we
2374 * find the first non-idle head, because checking for idle has side effects: in
2375 * headSurface, checking for idle gives the headSurface flip queue the
2376 * opportunity to proceed another frame.
2377 */
IdleBaseChannelCheckIdle(NVDevEvoPtr pDevEvo,const struct NvKmsIdleBaseChannelRequest * pRequest,struct NvKmsIdleBaseChannelReply * pReply)2378 static NvBool IdleBaseChannelCheckIdle(
2379 NVDevEvoPtr pDevEvo,
2380 const struct NvKmsIdleBaseChannelRequest *pRequest,
2381 struct NvKmsIdleBaseChannelReply *pReply)
2382 {
2383 NvU32 apiHead, sd;
2384 NVDispEvoPtr pDispEvo;
2385 NvBool allIdle = TRUE;
2386
2387 FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) {
2388
2389 for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) {
2390
2391 NvBool idle;
2392
2393 if (!nvApiHeadIsActive(pDispEvo, apiHead)) {
2394 continue;
2395 }
2396
2397 if ((pRequest->subDevicesPerHead[apiHead] & NVBIT(sd)) == 0) {
2398 continue;
2399 }
2400
2401 idle = IdleMainLayerChannelCheckIdleOneApiHead(pDispEvo, apiHead);
2402
2403 if (!idle) {
2404 pReply->stopSubDevicesPerHead[apiHead] |= NVBIT(sd);
2405 }
2406 allIdle = allIdle && idle;
2407 }
2408 }
2409
2410 return allIdle;
2411 }
2412
2413 /*!
2414 * Idle all requested heads.
2415 *
2416 * First, wait for the heads to idle naturally. If a timeout is exceeded, then
2417 * force the non-idle heads to idle, and record these in pReply.
2418 */
IdleBaseChannelAll(NVDevEvoPtr pDevEvo,const struct NvKmsIdleBaseChannelRequest * pRequest,struct NvKmsIdleBaseChannelReply * pReply)2419 static NvBool IdleBaseChannelAll(
2420 NVDevEvoPtr pDevEvo,
2421 const struct NvKmsIdleBaseChannelRequest *pRequest,
2422 struct NvKmsIdleBaseChannelReply *pReply)
2423 {
2424 NvU64 startTime = 0;
2425
2426 /*
2427 * Each element in subDevicesPerHead[] must be large enough to hold one bit
2428 * per subdevice.
2429 */
2430 ct_assert(NVKMS_MAX_SUBDEVICES <=
2431 (sizeof(pRequest->subDevicesPerHead[0]) * 8));
2432
2433 /* Loop until all head,sd pairs are idle, or we time out. */
2434 do {
2435 const NvU32 timeout = 2000000; /* 2 seconds */
2436
2437
2438 /*
2439 * Clear the pReply data,
2440 * IdleBaseChannelCheckIdle() will fill it afresh.
2441 */
2442 nvkms_memset(pReply, 0, sizeof(*pReply));
2443
2444 /* If all heads are idle, we are done. */
2445 if (IdleBaseChannelCheckIdle(pDevEvo, pRequest, pReply)) {
2446 return TRUE;
2447 }
2448
2449 /* Break out of the loop if we exceed the timeout. */
2450 if (nvExceedsTimeoutUSec(pDevEvo, &startTime, timeout)) {
2451 break;
2452 }
2453
2454 /* At least one head is not idle; yield, and try again. */
2455 nvkms_yield();
2456
2457 } while (TRUE);
2458
2459 return TRUE;
2460 }
2461
2462
2463 /*!
2464 * Wait for the requested base channels to be idle, returning whether
2465 * stopping the base channels was necessary.
2466 */
IdleBaseChannel(struct NvKmsPerOpen * pOpen,void * pParamsVoid)2467 static NvBool IdleBaseChannel(struct NvKmsPerOpen *pOpen,
2468 void *pParamsVoid)
2469 {
2470 struct NvKmsIdleBaseChannelParams *pParams = pParamsVoid;
2471 struct NvKmsPerOpenDev *pOpenDev;
2472
2473 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle);
2474
2475 if (pOpenDev == NULL) {
2476 return FALSE;
2477 }
2478
2479 /* Only a modeset owner can idle base. */
2480
2481 if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) {
2482 return FALSE;
2483 }
2484
2485 return IdleBaseChannelAll(pOpenDev->pDevEvo,
2486 &pParams->request, &pParams->reply);
2487 }
2488
2489
2490 /* No extra user state needed for Flip; although we lose the user pointers
2491 * for the LUT ramps after copying them in, that's okay because we don't need
2492 * to copy them back out again. */
2493 struct NvKmsFlipExtraUserState
2494 {
2495 // Nothing needed.
2496 };
2497
2498 /*!
2499 * Copy in any data referenced by pointer for the Flip request. Currently
2500 * this is the flip head request array and the LUT ramps.
2501 */
FlipPrepUser(void * pParamsVoid,void * pExtraUserStateVoid)2502 static NvBool FlipPrepUser(
2503 void *pParamsVoid,
2504 void *pExtraUserStateVoid)
2505 {
2506 struct NvKmsFlipParams *pParams = pParamsVoid;
2507 struct NvKmsFlipRequest *pRequest = &pParams->request;
2508 struct NvKmsFlipRequestOneHead *pFlipHeadKernel = NULL;
2509 NvU64 pFlipHeadUser = pRequest->pFlipHead;
2510 size_t size;
2511 NvU32 apiHead, apiHeadFailed;
2512 int status;
2513
2514 if (!nvKmsNvU64AddressIsSafe(pFlipHeadUser)) {
2515 return FALSE;
2516 }
2517
2518 if (pRequest->numFlipHeads <= 0 ||
2519 pRequest->numFlipHeads > NV_MAX_FLIP_REQUEST_HEADS) {
2520 return FALSE;
2521 }
2522
2523 size = sizeof(*pFlipHeadKernel) * pRequest->numFlipHeads;
2524 pFlipHeadKernel = nvAlloc(size);
2525 if (!pFlipHeadKernel) {
2526 return FALSE;
2527 }
2528
2529 status = nvkms_copyin((char *)pFlipHeadKernel, pFlipHeadUser, size);
2530 if (status != 0) {
2531 nvFree(pFlipHeadKernel);
2532 return FALSE;
2533 }
2534
2535 /* Iterate over all of the common LUT ramp pointers embedded in the Flip
2536 * request, and copy in each one. */
2537 for (apiHead = 0; apiHead < pRequest->numFlipHeads; apiHead++) {
2538 struct NvKmsSetLutCommonParams *pCommonLutParams =
2539 &pFlipHeadKernel[apiHead].flip.lut;
2540
2541 if (!CopyInLutParams(pCommonLutParams)) {
2542 /* Remember how far we got through this loop before we
2543 * failed, so that we can undo everything up to this point. */
2544 apiHeadFailed = apiHead;
2545 goto fail_lut;
2546 }
2547 }
2548
2549 pRequest->pFlipHead = nvKmsPointerToNvU64(pFlipHeadKernel);
2550
2551 return TRUE;
2552
2553 fail_lut:
2554 for (apiHead = 0; apiHead < apiHeadFailed; apiHead++) {
2555 struct NvKmsSetLutCommonParams *pCommonLutParams =
2556 &pFlipHeadKernel[apiHead].flip.lut;
2557
2558 FreeCopiedInLutParams(pCommonLutParams);
2559 }
2560 nvFree(pFlipHeadKernel);
2561 return FALSE;
2562 }
2563
2564 /*!
2565 * Free buffers allocated in FlipPrepUser.
2566 */
FlipDoneUser(void * pParamsVoid,void * pExtraUserStateVoid)2567 static NvBool FlipDoneUser(
2568 void *pParamsVoid,
2569 void *pExtraUserStateVoid)
2570 {
2571 struct NvKmsFlipParams *pParams = pParamsVoid;
2572 struct NvKmsFlipRequest *pRequest = &pParams->request;
2573 struct NvKmsFlipRequestOneHead *pFlipHead = nvKmsNvU64ToPointer(pRequest->pFlipHead);
2574 NvU32 apiHead;
2575
2576 for (apiHead = 0; apiHead < pRequest->numFlipHeads; apiHead++) {
2577 struct NvKmsSetLutCommonParams *pCommonLutParams =
2578 &pFlipHead[apiHead].flip.lut;
2579
2580 FreeCopiedInLutParams(pCommonLutParams);
2581 }
2582 nvFree(pFlipHead);
2583 /* The request is not copied back out to userspace (only the reply is), so
2584 * we don't need to worry about restoring the user pointer */
2585 pRequest->pFlipHead = 0;
2586
2587 return TRUE;
2588 }
2589
2590 /*!
2591 * For each entry in the array pointed to by 'pFlipHead', of length
2592 * 'numFlipHeads', verify that the sd and head values specified are within
2593 * bounds and that there are no duplicates.
2594 */
ValidateFlipHeads(NVDevEvoPtr pDevEvo,const struct NvKmsFlipRequestOneHead * pFlipHead,NvU32 numFlipHeads)2595 static NvBool ValidateFlipHeads(
2596 NVDevEvoPtr pDevEvo,
2597 const struct NvKmsFlipRequestOneHead *pFlipHead,
2598 NvU32 numFlipHeads)
2599 {
2600 NvU32 i;
2601 ct_assert(NVKMS_MAX_HEADS_PER_DISP <= 8);
2602 NvU8 apiHeadsUsed[NVKMS_MAX_SUBDEVICES] = { };
2603
2604 for (i = 0; i < numFlipHeads; i++) {
2605 const NvU32 sd = pFlipHead[i].sd;
2606 const NvU32 apiHead = pFlipHead[i].head;
2607
2608 if (sd >= pDevEvo->numSubDevices) {
2609 return FALSE;
2610 }
2611 if (apiHead >= pDevEvo->numApiHeads) {
2612 return FALSE;
2613 }
2614 if ((apiHeadsUsed[sd] & (1 << apiHead)) != 0) {
2615 return FALSE;
2616 }
2617 apiHeadsUsed[sd] |= (1 << apiHead);
2618 }
2619
2620 return TRUE;
2621 }
2622
2623 /*!
2624 * Flip the specified head.
2625 */
Flip(struct NvKmsPerOpen * pOpen,void * pParamsVoid)2626 static NvBool Flip(struct NvKmsPerOpen *pOpen,
2627 void *pParamsVoid)
2628 {
2629 struct NvKmsFlipParams *pParams = pParamsVoid;
2630 struct NvKmsPerOpenDev *pOpenDev;
2631 NVDevEvoPtr pDevEvo = NULL;
2632 const struct NvKmsFlipRequest *pRequest = &pParams->request;
2633 const struct NvKmsFlipRequestOneHead *pFlipHead =
2634 nvKmsNvU64ToPointer(pRequest->pFlipHead);
2635 const NvU32 numFlipHeads = pRequest->numFlipHeads;
2636
2637 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle);
2638
2639 if (pOpenDev == NULL) {
2640 return FALSE;
2641 }
2642
2643 pDevEvo = pOpenDev->pDevEvo;
2644
2645 if (!ValidateFlipHeads(pDevEvo, pFlipHead, numFlipHeads)) {
2646 return FALSE;
2647 }
2648
2649 return nvHsIoctlFlip(pDevEvo, pOpenDev,
2650 pFlipHead, numFlipHeads,
2651 pRequest->commit, pRequest->allowVrr,
2652 &pParams->reply);
2653 }
2654
2655
2656 /*!
2657 * Record whether this client is interested in the specified dynamic
2658 * dpy.
2659 */
DeclareDynamicDpyInterest(struct NvKmsPerOpen * pOpen,void * pParamsVoid)2660 static NvBool DeclareDynamicDpyInterest(struct NvKmsPerOpen *pOpen,
2661 void *pParamsVoid)
2662 {
2663 /* XXX NVKMS TODO: implement me. */
2664
2665 return TRUE;
2666 }
2667
2668
2669 /*!
2670 * Register a surface with the specified per-open + device.
2671 */
RegisterSurface(struct NvKmsPerOpen * pOpen,void * pParamsVoid)2672 static NvBool RegisterSurface(struct NvKmsPerOpen *pOpen,
2673 void *pParamsVoid)
2674 {
2675 struct NvKmsRegisterSurfaceParams *pParams = pParamsVoid;
2676 struct NvKmsPerOpenDev *pOpenDev;
2677
2678 /*
2679 * Only allow userspace clients to specify memory objects by FD.
2680 * This prevents clients from specifying (hClient, hObject) tuples that
2681 * really belong to other clients.
2682 */
2683 if (pOpen->clientType == NVKMS_CLIENT_USER_SPACE &&
2684 !pParams->request.useFd) {
2685 return FALSE;
2686 }
2687
2688 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle);
2689
2690 if (pOpenDev == NULL) {
2691 return FALSE;
2692 }
2693
2694 nvEvoRegisterSurface(pOpenDev->pDevEvo, pOpenDev, pParams,
2695 NvHsMapPermissionsReadOnly);
2696 return TRUE;
2697 }
2698
2699
2700 /*!
2701 * Unregister a surface from the specified per-open + device.
2702 */
UnregisterSurface(struct NvKmsPerOpen * pOpen,void * pParamsVoid)2703 static NvBool UnregisterSurface(struct NvKmsPerOpen *pOpen,
2704 void *pParamsVoid)
2705 {
2706 struct NvKmsUnregisterSurfaceParams *pParams = pParamsVoid;
2707 struct NvKmsPerOpenDev *pOpenDev;
2708
2709 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle);
2710
2711 if (pOpenDev == NULL) {
2712 return FALSE;
2713 }
2714
2715 nvEvoUnregisterSurface(pOpenDev->pDevEvo, pOpenDev,
2716 pParams->request.surfaceHandle,
2717 FALSE /* skipUpdate */);
2718 return TRUE;
2719 }
2720
2721
2722 /*!
2723 * Associate a surface with the NvKmsPerOpen specified by
2724 * NvKmsGrantSurfaceParams::request::fd.
2725 */
GrantSurface(struct NvKmsPerOpen * pOpen,void * pParamsVoid)2726 static NvBool GrantSurface(struct NvKmsPerOpen *pOpen, void *pParamsVoid)
2727 {
2728 struct NvKmsGrantSurfaceParams *pParams = pParamsVoid;
2729 struct NvKmsPerOpenDev *pOpenDev;
2730 NVSurfaceEvoPtr pSurfaceEvo;
2731 struct NvKmsPerOpen *pOpenFd;
2732
2733 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle);
2734
2735 if (pOpenDev == NULL) {
2736 return FALSE;
2737 }
2738
2739 pSurfaceEvo =
2740 nvEvoGetSurfaceFromHandleNoDispHWAccessOk(pOpenDev->pDevEvo,
2741 &pOpenDev->surfaceHandles,
2742 pParams->request.surfaceHandle);
2743 if (pSurfaceEvo == NULL) {
2744 return FALSE;
2745 }
2746
2747 if (nvEvoSurfaceRefCntsTooLarge(pSurfaceEvo)) {
2748 return FALSE;
2749 }
2750
2751 /* Only the owner of the surface can grant it to other clients. */
2752
2753 if (!nvEvoIsSurfaceOwner(pSurfaceEvo, pOpenDev,
2754 pParams->request.surfaceHandle)) {
2755 return FALSE;
2756 }
2757
2758 pOpenFd = nvkms_get_per_open_data(pParams->request.fd);
2759
2760 if (pOpenFd == NULL) {
2761 return FALSE;
2762 }
2763
2764 if (!AssignNvKmsPerOpenType(
2765 pOpenFd, NvKmsPerOpenTypeGrantSurface, FALSE)) {
2766 return FALSE;
2767 }
2768
2769 nvEvoIncrementSurfaceStructRefCnt(pSurfaceEvo);
2770 pOpenFd->grantSurface.pSurfaceEvo = pSurfaceEvo;
2771
2772 return TRUE;
2773 }
2774
2775
2776 /*!
2777 * Retrieve the surface and device associated with
2778 * NvKmsAcquireSurfaceParams::request::fd, and give the client an
2779 * NvKmsSurfaceHandle to the surface.
2780 */
AcquireSurface(struct NvKmsPerOpen * pOpen,void * pParamsVoid)2781 static NvBool AcquireSurface(struct NvKmsPerOpen *pOpen, void *pParamsVoid)
2782 {
2783 struct NvKmsAcquireSurfaceParams *pParams = pParamsVoid;
2784 struct NvKmsPerOpen *pOpenFd;
2785 struct NvKmsPerOpenDev *pOpenDev;
2786 NvKmsSurfaceHandle surfaceHandle = 0;
2787
2788 pOpenFd = nvkms_get_per_open_data(pParams->request.fd);
2789
2790 if (pOpenFd == NULL) {
2791 return FALSE;
2792 }
2793
2794 if (pOpenFd->type != NvKmsPerOpenTypeGrantSurface) {
2795 return FALSE;
2796 }
2797
2798 nvAssert(pOpenFd->grantSurface.pSurfaceEvo != NULL);
2799
2800 if (pOpenFd->grantSurface.pSurfaceEvo->rmRefCnt == 0) { /* orphan */
2801 return FALSE;
2802 }
2803
2804 if (nvEvoSurfaceRefCntsTooLarge(pOpenFd->grantSurface.pSurfaceEvo)) {
2805 return FALSE;
2806 }
2807
2808 /* Since the surface isn't orphaned, it should have an owner, with a
2809 * pOpenDev and a pDevEvo. Get the pOpenDev for the acquiring client that
2810 * matches the owner's pDevEvo. */
2811 nvAssert(pOpenFd->grantSurface.pSurfaceEvo->owner.pOpenDev->pDevEvo != NULL);
2812 pOpenDev = DevEvoToOpenDev(pOpen,
2813 pOpenFd->grantSurface.pSurfaceEvo->owner.pOpenDev->pDevEvo);
2814
2815 if (pOpenDev == NULL) {
2816 return FALSE;
2817 }
2818
2819 surfaceHandle =
2820 nvEvoCreateApiHandle(&pOpenDev->surfaceHandles,
2821 pOpenFd->grantSurface.pSurfaceEvo);
2822
2823 if (surfaceHandle == 0) {
2824 return FALSE;
2825 }
2826
2827 nvEvoIncrementSurfaceStructRefCnt(pOpenFd->grantSurface.pSurfaceEvo);
2828
2829 pParams->reply.deviceHandle = pOpenDev->nvKmsApiHandle;
2830 pParams->reply.surfaceHandle = surfaceHandle;
2831
2832 return TRUE;
2833 }
2834
ReleaseSurface(struct NvKmsPerOpen * pOpen,void * pParamsVoid)2835 static NvBool ReleaseSurface(struct NvKmsPerOpen *pOpen, void *pParamsVoid)
2836 {
2837 struct NvKmsReleaseSurfaceParams *pParams = pParamsVoid;
2838 struct NvKmsPerOpenDev *pOpenDev;
2839
2840 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle);
2841
2842 if (pOpenDev == NULL) {
2843 return FALSE;
2844 }
2845
2846 nvEvoReleaseSurface(pOpenDev->pDevEvo, pOpenDev,
2847 pParams->request.surfaceHandle);
2848 return TRUE;
2849 }
2850
2851
2852 /*!
2853 * Associate a swap group with the NvKmsPerOpen specified by
2854 * NvKmsGrantSwapGroupParams::request::fd.
2855 */
GrantSwapGroup(struct NvKmsPerOpen * pOpen,void * pParamsVoid)2856 static NvBool GrantSwapGroup(struct NvKmsPerOpen *pOpen, void *pParamsVoid)
2857 {
2858 struct NvKmsGrantSwapGroupParams *pParams = pParamsVoid;
2859 struct NvKmsPerOpenDev *pOpenDev;
2860 NVSwapGroupRec *pSwapGroup;
2861 struct NvKmsPerOpen *pOpenFd;
2862
2863 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle);
2864
2865 if (pOpenDev == NULL) {
2866 return FALSE;
2867 }
2868
2869 if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) {
2870 return FALSE;
2871 }
2872
2873 pSwapGroup = nvHsGetSwapGroup(&pOpenDev->swapGroupHandles,
2874 pParams->request.swapGroupHandle);
2875
2876 if (pSwapGroup == NULL) {
2877 return FALSE;
2878 }
2879
2880 pOpenFd = nvkms_get_per_open_data(pParams->request.fd);
2881
2882 if (pOpenFd == NULL) {
2883 return FALSE;
2884 }
2885
2886 /*
2887 * Increment the swap group refcnt while granting it so the SwapGroup
2888 * won't be freed out from under the grant fd. To complement this,
2889 * nvKmsClose() on NvKmsPerOpenTypeGrantSwapGroup calls
2890 * DecrementSwapGroupRefCnt().
2891 */
2892 if (!nvHsIncrementSwapGroupRefCnt(pSwapGroup)) {
2893 return FALSE;
2894 }
2895
2896 if (!AssignNvKmsPerOpenType(
2897 pOpenFd, NvKmsPerOpenTypeGrantSwapGroup, FALSE)) {
2898 nvHsDecrementSwapGroupRefCnt(pSwapGroup);
2899 return FALSE;
2900 }
2901
2902 /* we must not fail beyond this point */
2903
2904 pOpenFd->grantSwapGroup.pSwapGroup = pSwapGroup;
2905
2906 pOpenFd->grantSwapGroup.pDevEvo = pOpenDev->pDevEvo;
2907
2908 return TRUE;
2909 }
2910
2911
2912 /*!
2913 * Retrieve the swap group and device associated with
2914 * NvKmsAcquireSwapGroupParams::request::fd, give the client an
2915 * NvKmsSwapGroupHandle to the swap group, and increment the
2916 * swap group's reference count.
2917 */
AcquireSwapGroup(struct NvKmsPerOpen * pOpen,void * pParamsVoid)2918 static NvBool AcquireSwapGroup(struct NvKmsPerOpen *pOpen, void *pParamsVoid)
2919 {
2920 struct NvKmsAcquireSwapGroupParams *pParams = pParamsVoid;
2921 struct NvKmsPerOpen *pOpenFd;
2922 struct NvKmsPerOpenDev *pOpenDev;
2923 NvKmsSwapGroupHandle swapGroupHandle = 0;
2924
2925 pOpenFd = nvkms_get_per_open_data(pParams->request.fd);
2926
2927 if (pOpenFd == NULL) {
2928 return FALSE;
2929 }
2930
2931 if (pOpenFd->type != NvKmsPerOpenTypeGrantSwapGroup) {
2932 return FALSE;
2933 }
2934
2935 /*
2936 * pSwapGroup is only freed when its last reference goes away; if pOpenFd
2937 * hasn't yet been closed, then its reference incremented in
2938 * GrantSwapGroup() couldn't have been decremented in nvKmsClose()
2939 */
2940 nvAssert(pOpenFd->grantSwapGroup.pSwapGroup != NULL);
2941 nvAssert(pOpenFd->grantSwapGroup.pDevEvo != NULL);
2942
2943 if (pOpenFd->grantSwapGroup.pSwapGroup->zombie) {
2944 return FALSE;
2945 }
2946
2947 pOpenDev = DevEvoToOpenDev(pOpen, pOpenFd->grantSwapGroup.pDevEvo);
2948
2949 if (pOpenDev == NULL) {
2950 return FALSE;
2951 }
2952
2953 if (nvEvoApiHandlePointerIsPresent(&pOpenDev->swapGroupHandles,
2954 pOpenFd->grantSwapGroup.pSwapGroup)) {
2955 return FALSE;
2956 }
2957
2958 if (!nvHsIncrementSwapGroupRefCnt(pOpenFd->grantSwapGroup.pSwapGroup)) {
2959 return FALSE;
2960 }
2961
2962 swapGroupHandle =
2963 nvEvoCreateApiHandle(&pOpenDev->swapGroupHandles,
2964 pOpenFd->grantSwapGroup.pSwapGroup);
2965
2966 if (swapGroupHandle == 0) {
2967 nvHsDecrementSwapGroupRefCnt(pOpenFd->grantSwapGroup.pSwapGroup);
2968 return FALSE;
2969 }
2970
2971 /* we must not fail beyond this point */
2972
2973 pParams->reply.deviceHandle = pOpenDev->nvKmsApiHandle;
2974 pParams->reply.swapGroupHandle = swapGroupHandle;
2975
2976 return TRUE;
2977 }
2978
2979
2980 /*!
2981 * Free this client's reference to the swap group.
2982 *
2983 * This is meant to be called by clients that have acquired the swap group
2984 * handle through AcquireSwapGroup().
2985 */
ReleaseSwapGroup(struct NvKmsPerOpen * pOpen,void * pParamsVoid)2986 static NvBool ReleaseSwapGroup(struct NvKmsPerOpen *pOpen, void *pParamsVoid)
2987 {
2988 struct NvKmsReleaseSwapGroupParams *pParams = pParamsVoid;
2989 struct NvKmsPerOpenDev *pOpenDev;
2990 NVSwapGroupRec *pSwapGroup;
2991 NvKmsSwapGroupHandle handle = pParams->request.swapGroupHandle;
2992
2993 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle);
2994
2995 if (pOpenDev == NULL) {
2996 return FALSE;
2997 }
2998
2999 /*
3000 * This may operate on a swap group that has already been freed
3001 * (pSwapGroup->zombie is TRUE).
3002 */
3003 pSwapGroup = nvHsGetSwapGroupStruct(&pOpenDev->swapGroupHandles,
3004 handle);
3005 if (pSwapGroup == NULL) {
3006 return FALSE;
3007 }
3008
3009 nvEvoDestroyApiHandle(&pOpenDev->swapGroupHandles, handle);
3010
3011 nvHsDecrementSwapGroupRefCnt(pSwapGroup);
3012
3013 return TRUE;
3014 }
3015
3016 /*!
3017 * Change the value of the specified attribute.
3018 */
SetDpyAttribute(struct NvKmsPerOpen * pOpen,void * pParamsVoid)3019 static NvBool SetDpyAttribute(struct NvKmsPerOpen *pOpen,
3020 void *pParamsVoid)
3021 {
3022 struct NvKmsSetDpyAttributeParams *pParams = pParamsVoid;
3023 NVDpyEvoPtr pDpyEvo;
3024
3025 pDpyEvo = GetPerOpenDpy(pOpen,
3026 pParams->request.deviceHandle,
3027 pParams->request.dispHandle,
3028 pParams->request.dpyId);
3029 if (pDpyEvo == NULL) {
3030 return FALSE;
3031 }
3032
3033 return nvSetDpyAttributeEvo(pDpyEvo, pParams);
3034 }
3035
3036
3037 /*!
3038 * Get the value of the specified attribute.
3039 */
GetDpyAttribute(struct NvKmsPerOpen * pOpen,void * pParamsVoid)3040 static NvBool GetDpyAttribute(struct NvKmsPerOpen *pOpen,
3041 void *pParamsVoid)
3042 {
3043 struct NvKmsGetDpyAttributeParams *pParams = pParamsVoid;
3044 NVDpyEvoPtr pDpyEvo;
3045
3046 pDpyEvo = GetPerOpenDpy(pOpen,
3047 pParams->request.deviceHandle,
3048 pParams->request.dispHandle,
3049 pParams->request.dpyId);
3050 if (pDpyEvo == NULL) {
3051 return FALSE;
3052 }
3053
3054 return nvGetDpyAttributeEvo(pDpyEvo, pParams);
3055 }
3056
3057
3058 /*!
3059 * Get the valid values of the specified attribute.
3060 */
GetDpyAttributeValidValues(struct NvKmsPerOpen * pOpen,void * pParamsVoid)3061 static NvBool GetDpyAttributeValidValues(struct NvKmsPerOpen *pOpen,
3062 void *pParamsVoid)
3063 {
3064 struct NvKmsGetDpyAttributeValidValuesParams *pParams = pParamsVoid;
3065 NVDpyEvoPtr pDpyEvo;
3066
3067 pDpyEvo = GetPerOpenDpy(pOpen,
3068 pParams->request.deviceHandle,
3069 pParams->request.dispHandle,
3070 pParams->request.dpyId);
3071 if (pDpyEvo == NULL) {
3072 return FALSE;
3073 }
3074
3075 return nvGetDpyAttributeValidValuesEvo(pDpyEvo, pParams);
3076 }
3077
3078
3079 /*!
3080 * Set the value of the specified attribute.
3081 */
SetDispAttribute(struct NvKmsPerOpen * pOpen,void * pParamsVoid)3082 static NvBool SetDispAttribute(struct NvKmsPerOpen *pOpen,
3083 void *pParamsVoid)
3084 {
3085 struct NvKmsSetDispAttributeParams *pParams = pParamsVoid;
3086 struct NvKmsPerOpenDisp *pOpenDisp;
3087
3088 pOpenDisp = GetPerOpenDisp(pOpen,
3089 pParams->request.deviceHandle,
3090 pParams->request.dispHandle);
3091 if (pOpenDisp == NULL) {
3092 return FALSE;
3093 }
3094
3095 return nvSetDispAttributeEvo(pOpenDisp->pDispEvo, pParams);
3096 }
3097
3098
3099 /*!
3100 * Get the value of the specified attribute.
3101 */
GetDispAttribute(struct NvKmsPerOpen * pOpen,void * pParamsVoid)3102 static NvBool GetDispAttribute(struct NvKmsPerOpen *pOpen,
3103 void *pParamsVoid)
3104 {
3105 struct NvKmsGetDispAttributeParams *pParams = pParamsVoid;
3106 struct NvKmsPerOpenDisp *pOpenDisp;
3107
3108 pOpenDisp = GetPerOpenDisp(pOpen,
3109 pParams->request.deviceHandle,
3110 pParams->request.dispHandle);
3111 if (pOpenDisp == NULL) {
3112 return FALSE;
3113 }
3114
3115 return nvGetDispAttributeEvo(pOpenDisp->pDispEvo, pParams);
3116 }
3117
3118
3119 /*!
3120 * Get the valid values of the specified attribute.
3121 */
GetDispAttributeValidValues(struct NvKmsPerOpen * pOpen,void * pParamsVoid)3122 static NvBool GetDispAttributeValidValues(struct NvKmsPerOpen *pOpen,
3123 void *pParamsVoid)
3124 {
3125 struct NvKmsGetDispAttributeValidValuesParams *pParams = pParamsVoid;
3126
3127 struct NvKmsPerOpenDisp *pOpenDisp;
3128
3129 pOpenDisp = GetPerOpenDisp(pOpen,
3130 pParams->request.deviceHandle,
3131 pParams->request.dispHandle);
3132 if (pOpenDisp == NULL) {
3133 return FALSE;
3134 }
3135
3136 return nvGetDispAttributeValidValuesEvo(pOpenDisp->pDispEvo, pParams);
3137 }
3138
3139
3140 /*!
3141 * Get information about the specified framelock device.
3142 */
QueryFrameLock(struct NvKmsPerOpen * pOpen,void * pParamsVoid)3143 static NvBool QueryFrameLock(struct NvKmsPerOpen *pOpen,
3144 void *pParamsVoid)
3145 {
3146 struct NvKmsQueryFrameLockParams *pParams = pParamsVoid;
3147 struct NvKmsPerOpenFrameLock *pOpenFrameLock;
3148 const NVFrameLockEvoRec *pFrameLockEvo;
3149 NvU32 gpu;
3150
3151 nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply));
3152
3153 pOpenFrameLock =
3154 GetPerOpenFrameLock(pOpen, pParams->request.frameLockHandle);
3155
3156 if (pOpenFrameLock == NULL) {
3157 return FALSE;
3158 }
3159
3160 pFrameLockEvo = pOpenFrameLock->pFrameLockEvo;
3161
3162 ct_assert(ARRAY_LEN(pFrameLockEvo->gpuIds) <=
3163 ARRAY_LEN(pParams->reply.gpuIds));
3164
3165 for (gpu = 0; gpu < pFrameLockEvo->nGpuIds; gpu++) {
3166 pParams->reply.gpuIds[gpu] = pFrameLockEvo->gpuIds[gpu];
3167 }
3168
3169 return TRUE;
3170 }
3171
3172
SetFrameLockAttribute(struct NvKmsPerOpen * pOpen,void * pParamsVoid)3173 static NvBool SetFrameLockAttribute(struct NvKmsPerOpen *pOpen,
3174 void *pParamsVoid)
3175 {
3176 struct NvKmsSetFrameLockAttributeParams *pParams = pParamsVoid;
3177 struct NvKmsPerOpenFrameLock *pOpenFrameLock;
3178 NVFrameLockEvoRec *pFrameLockEvo;
3179
3180 pOpenFrameLock =
3181 GetPerOpenFrameLock(pOpen, pParams->request.frameLockHandle);
3182
3183 if (pOpenFrameLock == NULL) {
3184 return FALSE;
3185 }
3186
3187 pFrameLockEvo = pOpenFrameLock->pFrameLockEvo;
3188
3189 return nvSetFrameLockAttributeEvo(pFrameLockEvo, pParams);
3190 }
3191
3192
GetFrameLockAttribute(struct NvKmsPerOpen * pOpen,void * pParamsVoid)3193 static NvBool GetFrameLockAttribute(struct NvKmsPerOpen *pOpen,
3194 void *pParamsVoid)
3195 {
3196 struct NvKmsGetFrameLockAttributeParams *pParams = pParamsVoid;
3197 struct NvKmsPerOpenFrameLock *pOpenFrameLock;
3198 const NVFrameLockEvoRec *pFrameLockEvo;
3199
3200 nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply));
3201
3202 pOpenFrameLock =
3203 GetPerOpenFrameLock(pOpen, pParams->request.frameLockHandle);
3204
3205 if (pOpenFrameLock == NULL) {
3206 return FALSE;
3207 }
3208
3209 pFrameLockEvo = pOpenFrameLock->pFrameLockEvo;
3210
3211 return nvGetFrameLockAttributeEvo(pFrameLockEvo, pParams);
3212 }
3213
3214
GetFrameLockAttributeValidValues(struct NvKmsPerOpen * pOpen,void * pParamsVoid)3215 static NvBool GetFrameLockAttributeValidValues(struct NvKmsPerOpen *pOpen,
3216 void *pParamsVoid)
3217 {
3218 struct NvKmsGetFrameLockAttributeValidValuesParams *pParams = pParamsVoid;
3219 struct NvKmsPerOpenFrameLock *pOpenFrameLock;
3220 const NVFrameLockEvoRec *pFrameLockEvo;
3221
3222 nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply));
3223
3224 pOpenFrameLock =
3225 GetPerOpenFrameLock(pOpen, pParams->request.frameLockHandle);
3226
3227 if (pOpenFrameLock == NULL) {
3228 return FALSE;
3229 }
3230
3231 pFrameLockEvo = pOpenFrameLock->pFrameLockEvo;
3232
3233 return nvGetFrameLockAttributeValidValuesEvo(pFrameLockEvo, pParams);
3234 }
3235
3236
3237 /*!
3238 * Pop the next event off of the client's event queue.
3239 */
GetNextEvent(struct NvKmsPerOpen * pOpen,void * pParamsVoid)3240 static NvBool GetNextEvent(struct NvKmsPerOpen *pOpen,
3241 void *pParamsVoid)
3242 {
3243 struct NvKmsGetNextEventParams *pParams = pParamsVoid;
3244 struct NvKmsPerOpenEventListEntry *pEntry;
3245
3246 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl);
3247
3248 if (nvListIsEmpty(&pOpen->ioctl.eventList)) {
3249 pParams->reply.valid = FALSE;
3250 return TRUE;
3251 }
3252
3253 pEntry = nvListFirstEntry(&pOpen->ioctl.eventList,
3254 struct NvKmsPerOpenEventListEntry,
3255 eventListEntry);
3256
3257 pParams->reply.valid = TRUE;
3258 pParams->reply.event = pEntry->event;
3259
3260 nvListDel(&pEntry->eventListEntry);
3261
3262 nvFree(pEntry);
3263
3264 if (nvListIsEmpty(&pOpen->ioctl.eventList)) {
3265 nvkms_event_queue_changed(pOpen->pOpenKernel, FALSE);
3266 }
3267
3268 return TRUE;
3269 }
3270
3271
3272 /*!
3273 * Record the client's event interest for the specified device.
3274 */
DeclareEventInterest(struct NvKmsPerOpen * pOpen,void * pParamsVoid)3275 static NvBool DeclareEventInterest(struct NvKmsPerOpen *pOpen,
3276 void *pParamsVoid)
3277 {
3278 struct NvKmsDeclareEventInterestParams *pParams = pParamsVoid;
3279
3280 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl);
3281
3282 pOpen->ioctl.eventInterestMask = pParams->request.interestMask;
3283
3284 return TRUE;
3285 }
3286
ClearUnicastEvent(struct NvKmsPerOpen * pOpen,void * pParamsVoid)3287 static NvBool ClearUnicastEvent(struct NvKmsPerOpen *pOpen,
3288 void *pParamsVoid)
3289 {
3290 struct NvKmsClearUnicastEventParams *pParams = pParamsVoid;
3291 struct NvKmsPerOpen *pOpenFd = NULL;
3292
3293 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl);
3294
3295 pOpenFd = nvkms_get_per_open_data(pParams->request.unicastEventFd);
3296
3297 if (pOpenFd == NULL) {
3298 return FALSE;
3299 }
3300
3301 if (pOpenFd->type != NvKmsPerOpenTypeUnicastEvent) {
3302 return FALSE;
3303 }
3304
3305 nvkms_event_queue_changed(pOpenFd->pOpenKernel, FALSE);
3306
3307 return TRUE;
3308 }
3309
SetLayerPosition(struct NvKmsPerOpen * pOpen,void * pParamsVoid)3310 static NvBool SetLayerPosition(struct NvKmsPerOpen *pOpen,
3311 void *pParamsVoid)
3312 {
3313 struct NvKmsSetLayerPositionParams *pParams = pParamsVoid;
3314 struct NvKmsPerOpenDev *pOpenDev;
3315
3316 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle);
3317
3318 if (pOpenDev == NULL) {
3319 return FALSE;
3320 }
3321
3322 /* XXX NVKMS HEADSURFACE TODO: intercept */
3323
3324 return nvLayerSetPositionEvo(pOpenDev->pDevEvo, &pParams->request);
3325 }
3326
GrabOwnership(struct NvKmsPerOpen * pOpen,void * pParamsVoid)3327 static NvBool GrabOwnership(struct NvKmsPerOpen *pOpen, void *pParamsVoid)
3328 {
3329 struct NvKmsGrabOwnershipParams *pParams = pParamsVoid;
3330 struct NvKmsPerOpenDev *pOpenDev;
3331
3332 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle);
3333
3334 if (pOpenDev == NULL) {
3335 return FALSE;
3336 }
3337
3338 // The only kind of ownership right now is modeset ownership.
3339 return GrabModesetOwnership(pOpenDev);
3340 }
3341
ReleaseOwnership(struct NvKmsPerOpen * pOpen,void * pParamsVoid)3342 static NvBool ReleaseOwnership(struct NvKmsPerOpen *pOpen, void *pParamsVoid)
3343 {
3344 struct NvKmsReleaseOwnershipParams *pParams = pParamsVoid;
3345 struct NvKmsPerOpenDev *pOpenDev;
3346
3347 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle);
3348
3349 if (pOpenDev == NULL) {
3350 return FALSE;
3351 }
3352
3353 // The only kind of ownership right now is modeset ownership.
3354 return ReleaseModesetOwnership(pOpenDev);
3355 }
3356
GrantPermissions(struct NvKmsPerOpen * pOpen,void * pParamsVoid)3357 static NvBool GrantPermissions(struct NvKmsPerOpen *pOpen, void *pParamsVoid)
3358 {
3359 struct NvKmsGrantPermissionsParams *pParams = pParamsVoid;
3360 struct NvKmsPerOpenDev *pOpenDev;
3361 struct NvKmsPerOpen *pOpenFd;
3362
3363 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle);
3364
3365 if (pOpenDev == NULL) {
3366 return FALSE;
3367 }
3368
3369 /* Only a modeset owner can grant permissions. */
3370
3371 if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) {
3372 return FALSE;
3373 }
3374
3375 if (!ValidateNvKmsPermissions(pOpenDev->pDevEvo,
3376 &pParams->request.permissions,
3377 pOpen->clientType)) {
3378 return FALSE;
3379 }
3380
3381 pOpenFd = nvkms_get_per_open_data(pParams->request.fd);
3382
3383 if (pOpenFd == NULL) {
3384 return FALSE;
3385 }
3386
3387 if (!AssignNvKmsPerOpenType(
3388 pOpenFd, NvKmsPerOpenTypeGrantPermissions, FALSE)) {
3389 return FALSE;
3390 }
3391
3392 pOpenFd->grantPermissions.permissions = pParams->request.permissions;
3393
3394 pOpenFd->grantPermissions.pDevEvo = pOpenDev->pDevEvo;
3395
3396 return TRUE;
3397 }
3398
AcquirePermissions(struct NvKmsPerOpen * pOpen,void * pParamsVoid)3399 static NvBool AcquirePermissions(struct NvKmsPerOpen *pOpen, void *pParamsVoid)
3400 {
3401 struct NvKmsAcquirePermissionsParams *pParams = pParamsVoid;
3402 struct NvKmsPerOpenDev *pOpenDev;
3403 struct NvKmsPerOpen *pOpenFd;
3404 const struct NvKmsPermissions *pPermissionsNew;
3405 enum NvKmsPermissionsType type;
3406
3407 pOpenFd = nvkms_get_per_open_data(pParams->request.fd);
3408
3409 if (pOpenFd == NULL) {
3410 return FALSE;
3411 }
3412
3413 if (pOpenFd->type != NvKmsPerOpenTypeGrantPermissions) {
3414 return FALSE;
3415 }
3416
3417 pOpenDev = DevEvoToOpenDev(pOpen, pOpenFd->grantPermissions.pDevEvo);
3418
3419 if (pOpenDev == NULL) {
3420 return FALSE;
3421 }
3422
3423 type = pOpenFd->grantPermissions.permissions.type;
3424
3425 pPermissionsNew = &pOpenFd->grantPermissions.permissions;
3426
3427 if (type == NV_KMS_PERMISSIONS_TYPE_FLIPPING) {
3428 NvU32 d, h;
3429
3430 for (d = 0; d < ARRAY_LEN(pOpenDev->flipPermissions.disp); d++) {
3431 for (h = 0; h < ARRAY_LEN(pOpenDev->flipPermissions.
3432 disp[d].head); h++) {
3433 pOpenDev->flipPermissions.disp[d].head[h].layerMask |=
3434 pPermissionsNew->flip.disp[d].head[h].layerMask;
3435 }
3436 }
3437
3438 pParams->reply.permissions.flip = pOpenDev->flipPermissions;
3439
3440 } else if (type == NV_KMS_PERMISSIONS_TYPE_MODESET) {
3441 NvU32 d, h;
3442
3443 for (d = 0; d < ARRAY_LEN(pOpenDev->modesetPermissions.disp); d++) {
3444 for (h = 0; h < ARRAY_LEN(pOpenDev->modesetPermissions.
3445 disp[d].head); h++) {
3446 pOpenDev->modesetPermissions.disp[d].head[h].dpyIdList =
3447 nvAddDpyIdListToDpyIdList(
3448 pOpenDev->modesetPermissions.disp[d].head[h].dpyIdList,
3449 pPermissionsNew->modeset.disp[d].head[h].dpyIdList);
3450 }
3451 }
3452
3453 pParams->reply.permissions.modeset = pOpenDev->modesetPermissions;
3454
3455 } else if (type == NV_KMS_PERMISSIONS_TYPE_SUB_OWNER) {
3456
3457 if (pOpenDev->pDevEvo->modesetSubOwner != NULL) {
3458 /* There can be only one sub-owner */
3459 return FALSE;
3460 }
3461
3462 pOpenDev->pDevEvo->modesetSubOwner = pOpenDev;
3463 AssignFullNvKmsPermissions(pOpenDev);
3464
3465 } else {
3466 /*
3467 * GrantPermissions() should ensure that
3468 * pOpenFd->grantPermissions.permissions.type is always valid.
3469 */
3470 nvAssert(!"AcquirePermissions validation failure");
3471 return FALSE;
3472 }
3473
3474 pParams->reply.permissions.type = type;
3475 pParams->reply.deviceHandle = pOpenDev->nvKmsApiHandle;
3476
3477 return TRUE;
3478 }
3479
3480 /*!
3481 * Clear the set of permissions from pRevokingOpenDev.
3482 *
3483 * For NvKmsPerOpen::type==Ioctl, clear from permissions. It doesn't clear
3484 * itself or privileged.
3485 *
3486 * For NvKmsPerOpen::type==GrantPermissions, clear from
3487 * NvKmsPerOpen::grantPermissions, and reset NvKmsPerOpen::type to Undefined
3488 * if it is empty.
3489 */
RevokePermissionsSet(struct NvKmsPerOpenDev * pRevokingOpenDev,const struct NvKmsPermissions * pRevokingPermissions)3490 static NvBool RevokePermissionsSet(
3491 struct NvKmsPerOpenDev *pRevokingOpenDev,
3492 const struct NvKmsPermissions *pRevokingPermissions)
3493 {
3494 const NVDevEvoRec *pDevEvo;
3495 struct NvKmsPerOpen *pOpen;
3496 const struct NvKmsFlipPermissions *pRemoveFlip;
3497 const struct NvKmsModesetPermissions *pRemoveModeset;
3498
3499 // Only process valid permissions.
3500 if (pRevokingPermissions->type != NV_KMS_PERMISSIONS_TYPE_FLIPPING &&
3501 pRevokingPermissions->type != NV_KMS_PERMISSIONS_TYPE_MODESET) {
3502 return FALSE;
3503 }
3504
3505 pDevEvo = pRevokingOpenDev->pDevEvo;
3506 pRemoveFlip =
3507 (pRevokingPermissions->type == NV_KMS_PERMISSIONS_TYPE_FLIPPING)
3508 ? &pRevokingPermissions->flip
3509 : NULL;
3510 pRemoveModeset =
3511 (pRevokingPermissions->type == NV_KMS_PERMISSIONS_TYPE_MODESET)
3512 ? &pRevokingPermissions->modeset
3513 : NULL;
3514
3515 nvListForEachEntry(pOpen, &perOpenList, perOpenListEntry) {
3516 if ((pOpen->type == NvKmsPerOpenTypeGrantPermissions) &&
3517 (pOpen->grantPermissions.pDevEvo == pDevEvo)) {
3518 NvBool remainingPermissions = FALSE;
3519 struct NvKmsPermissions *pFdPermissions =
3520 &pOpen->grantPermissions.permissions;
3521
3522 if (pFdPermissions->type == NV_KMS_PERMISSIONS_TYPE_FLIPPING) {
3523 remainingPermissions =
3524 RemoveFlipPermissions(&pFdPermissions->flip, pRemoveFlip);
3525 } else {
3526 remainingPermissions = RemoveModesetPermissions(
3527 &pFdPermissions->modeset, pRemoveModeset);
3528 }
3529
3530 // Reset if it is empty.
3531 if (!remainingPermissions) {
3532 nvkms_memset(&pOpen->grantPermissions, 0,
3533 sizeof(pOpen->grantPermissions));
3534 pOpen->type = NvKmsPerOpenTypeUndefined;
3535 }
3536
3537 } else if (pOpen->type == NvKmsPerOpenTypeIoctl) {
3538
3539 struct NvKmsPerOpenDev *pOpenDev = DevEvoToOpenDev(pOpen, pDevEvo);
3540 if (pOpenDev == NULL) {
3541 continue;
3542 }
3543
3544 if (pOpenDev == pRevokingOpenDev || pOpenDev->isPrivileged) {
3545 continue;
3546 }
3547
3548 if (pRevokingPermissions->type == NV_KMS_PERMISSIONS_TYPE_FLIPPING) {
3549 RemoveFlipPermissions(&pOpenDev->flipPermissions, pRemoveFlip);
3550 } else {
3551 RemoveModesetPermissions(&pOpenDev->modesetPermissions,
3552 pRemoveModeset);
3553 }
3554 }
3555 }
3556
3557 return TRUE;
3558 }
3559
IsHeadRevoked(const NVDispEvoRec * pDispEvo,const NvU32 apiHead,void * pData)3560 static NvBool IsHeadRevoked(const NVDispEvoRec *pDispEvo,
3561 const NvU32 apiHead,
3562 void *pData)
3563 {
3564 const struct NvKmsPermissions *pPermissions = pData;
3565
3566 return !nvDpyIdListIsEmpty(
3567 pPermissions->modeset.disp[pDispEvo->displayOwner].head[apiHead].dpyIdList);
3568 }
3569
RevokePermissions(struct NvKmsPerOpen * pOpen,void * pParamsVoid)3570 static NvBool RevokePermissions(struct NvKmsPerOpen *pOpen, void *pParamsVoid)
3571 {
3572 struct NvKmsRevokePermissionsParams *pParams = pParamsVoid;
3573 struct NvKmsPerOpenDev *pOpenDev =
3574 GetPerOpenDev(pOpen, pParams->request.deviceHandle);
3575 const NvU32 validBitmask =
3576 NVBIT(NV_KMS_PERMISSIONS_TYPE_FLIPPING) |
3577 NVBIT(NV_KMS_PERMISSIONS_TYPE_MODESET) |
3578 NVBIT(NV_KMS_PERMISSIONS_TYPE_SUB_OWNER);
3579
3580 if (pOpenDev == NULL) {
3581 return FALSE;
3582 }
3583
3584 /* Reject invalid bitmasks. */
3585
3586 if ((pParams->request.permissionsTypeBitmask & ~validBitmask) != 0) {
3587 return FALSE;
3588 }
3589
3590 if ((pParams->request.permissionsTypeBitmask & NVBIT(NV_KMS_PERMISSIONS_TYPE_SUB_OWNER)) != 0) {
3591 if (pOpenDev->pDevEvo->modesetOwner != pOpenDev) {
3592 /* Only the modeset owner can revoke sub-owner permissions. */
3593 return FALSE;
3594 }
3595
3596 /*
3597 * When revoking ownership permissions, shut down all heads.
3598 *
3599 * This is necessary to keep the state of nvidia-drm in sync with NVKMS.
3600 * Otherwise, an NVKMS client can leave heads enabled when handing off
3601 * control of the device back to nvidia-drm, and nvidia-drm's flip queue
3602 * handling will get out of sync because it thinks all heads are
3603 * disabled and does not expect flip events on those heads.
3604 */
3605 nvShutDownApiHeads(pOpenDev->pDevEvo, pOpenDev, NULL /* pTestFunc */,
3606 NULL /* pData */,
3607 TRUE /* doRasterLock */);
3608 }
3609
3610 /*
3611 * Only a client with sub-owner permissions (or better) can revoke other
3612 * kinds of permissions.
3613 */
3614 if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) {
3615 return FALSE;
3616 }
3617
3618 if (pParams->request.permissionsTypeBitmask > 0) {
3619 // Old behavior, revoke all permissions of a type.
3620
3621 /* Revoke permissions for everyone except the caller. */
3622 RevokePermissionsInternal(pParams->request.permissionsTypeBitmask,
3623 pOpenDev->pDevEvo,
3624 pOpenDev /* pOpenDevExclude */);
3625 } else {
3626 /* If not using bitmask, revoke using the set. */
3627 if (!RevokePermissionsSet(pOpenDev, &pParams->request.permissions)) {
3628 return FALSE;
3629 }
3630
3631 /*
3632 * When revoking ownership permissions, shut down those heads.
3633 *
3634 * This is necessary to keep the state of nvidia-drm in sync with NVKMS.
3635 * Otherwise, an NVKMS client can leave heads enabled when handing off
3636 * control of the device back to nvidia-drm, which prevents them from
3637 * being able to be leased again.
3638 */
3639 if (pParams->request.permissions.type == NV_KMS_PERMISSIONS_TYPE_MODESET) {
3640 nvShutDownApiHeads(pOpenDev->pDevEvo, pOpenDev, IsHeadRevoked,
3641 &pParams->request.permissions,
3642 TRUE /* doRasterLock */);
3643 }
3644 }
3645
3646 return TRUE;
3647 }
3648
RegisterDeferredRequestFifo(struct NvKmsPerOpen * pOpen,void * pParamsVoid)3649 static NvBool RegisterDeferredRequestFifo(struct NvKmsPerOpen *pOpen,
3650 void *pParamsVoid)
3651 {
3652 struct NvKmsRegisterDeferredRequestFifoParams *pParams = pParamsVoid;
3653 struct NvKmsPerOpenDev *pOpenDev;
3654 NVSurfaceEvoPtr pSurfaceEvo;
3655 NVDeferredRequestFifoRec *pDeferredRequestFifo;
3656 NvKmsDeferredRequestFifoHandle handle;
3657
3658 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle);
3659
3660 if (pOpenDev == NULL) {
3661 return FALSE;
3662 }
3663
3664 pSurfaceEvo = nvEvoGetSurfaceFromHandleNoDispHWAccessOk(
3665 pOpenDev->pDevEvo,
3666 &pOpenDev->surfaceHandles,
3667 pParams->request.surfaceHandle);
3668
3669 if (pSurfaceEvo == NULL) {
3670 return FALSE;
3671 }
3672
3673 /*
3674 * WAR Bug 2050970: If a surface is unregistered and it wasn't registered
3675 * with NvKmsRegisterSurfaceRequest::noDisplayHardwareAccess, then the call
3676 * to nvRMSyncEvoChannel() in nvEvoDecrementSurfaceRefCnts() may hang
3677 * if any flips in flight acquire on semaphore releases that haven't
3678 * occurred yet.
3679 *
3680 * Since a ctxdma is not necessary for the deferred request fifo surface,
3681 * we work around this by forcing all surfaces that will be registered as
3682 * a deferred request fifo to be registered with
3683 * noDisplayHardwareAccess==TRUE, then skip the idle in
3684 * nvEvoDecrementSurfaceRefCnts() for these surfaces.
3685 */
3686 if (pSurfaceEvo->requireDisplayHardwareAccess) {
3687 return FALSE;
3688 }
3689
3690 pDeferredRequestFifo =
3691 nvEvoRegisterDeferredRequestFifo(pOpenDev->pDevEvo, pSurfaceEvo);
3692
3693 if (pDeferredRequestFifo == NULL) {
3694 return FALSE;
3695 }
3696
3697 handle = nvEvoCreateApiHandle(&pOpenDev->deferredRequestFifoHandles,
3698 pDeferredRequestFifo);
3699
3700 if (handle == 0) {
3701 nvEvoUnregisterDeferredRequestFifo(pOpenDev->pDevEvo,
3702 pDeferredRequestFifo);
3703 return FALSE;
3704 }
3705
3706 pParams->reply.deferredRequestFifoHandle = handle;
3707
3708 return TRUE;
3709 }
3710
UnregisterDeferredRequestFifo(struct NvKmsPerOpen * pOpen,void * pParamsVoid)3711 static NvBool UnregisterDeferredRequestFifo(struct NvKmsPerOpen *pOpen,
3712 void *pParamsVoid)
3713 {
3714 struct NvKmsUnregisterDeferredRequestFifoParams *pParams = pParamsVoid;
3715 NvKmsDeferredRequestFifoHandle handle =
3716 pParams->request.deferredRequestFifoHandle;
3717 NVDeferredRequestFifoRec *pDeferredRequestFifo;
3718 struct NvKmsPerOpenDev *pOpenDev =
3719 GetPerOpenDev(pOpen, pParams->request.deviceHandle);
3720
3721 if (pOpenDev == NULL) {
3722 return FALSE;
3723 }
3724
3725 pDeferredRequestFifo =
3726 nvEvoGetPointerFromApiHandle(
3727 &pOpenDev->deferredRequestFifoHandles, handle);
3728
3729 if (pDeferredRequestFifo == NULL) {
3730 return FALSE;
3731 }
3732
3733 nvEvoDestroyApiHandle(&pOpenDev->deferredRequestFifoHandles, handle);
3734
3735 nvEvoUnregisterDeferredRequestFifo(pOpenDev->pDevEvo, pDeferredRequestFifo);
3736
3737 return TRUE;
3738 }
3739
3740 /*!
3741 * Get the CRC32 data for the specified dpy.
3742 */
QueryDpyCRC32(struct NvKmsPerOpen * pOpen,void * pParamsVoid)3743 static NvBool QueryDpyCRC32(struct NvKmsPerOpen *pOpen,
3744 void *pParamsVoid)
3745 {
3746 struct NvKmsQueryDpyCRC32Params *pParams = pParamsVoid;
3747 struct NvKmsPerOpenDev *pOpenDev;
3748 struct NvKmsPerOpenDisp *pOpenDisp;
3749 NVDispEvoPtr pDispEvo;
3750 CRC32NotifierCrcOut crcOut;
3751
3752 if (!GetPerOpenDevAndDisp(pOpen,
3753 pParams->request.deviceHandle,
3754 pParams->request.dispHandle,
3755 &pOpenDev,
3756 &pOpenDisp)) {
3757 return FALSE;
3758 }
3759
3760 if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) {
3761 // Only a current owner can query CRC32 values.
3762 return FALSE;
3763 }
3764
3765 pDispEvo = pOpenDisp->pDispEvo;
3766
3767 if (!nvApiHeadIsActive(pDispEvo, pParams->request.head)) {
3768 return FALSE;
3769 }
3770
3771 nvkms_memset(&(pParams->reply), 0, sizeof(pParams->reply));
3772
3773 // Since will only read 1 frame of CRCs, point to single reply struct vals
3774 crcOut.rasterGeneratorCrc32 = &(pParams->reply.rasterGeneratorCrc32);
3775 crcOut.compositorCrc32 = &(pParams->reply.compositorCrc32);
3776 crcOut.outputCrc32 = &(pParams->reply.outputCrc32);
3777
3778 {
3779 /*
3780 * XXX[2Heads1OR] Is it sufficient to query CRC only for the primary
3781 * hardware head?
3782 */
3783 NvU32 head = nvGetPrimaryHwHead(pDispEvo, pParams->request.head);
3784
3785 nvAssert(head != NV_INVALID_HEAD);
3786
3787 if (!nvReadCRC32Evo(pDispEvo, head, &crcOut)) {
3788 return FALSE;
3789 }
3790 }
3791
3792 return TRUE;
3793 }
3794
AllocSwapGroup(struct NvKmsPerOpen * pOpen,void * pParamsVoid)3795 static NvBool AllocSwapGroup(
3796 struct NvKmsPerOpen *pOpen,
3797 void *pParamsVoid)
3798 {
3799 struct NvKmsAllocSwapGroupParams *pParams = pParamsVoid;
3800 struct NvKmsPerOpenDev *pOpenDev;
3801 NVSwapGroupRec *pSwapGroup;
3802 NvKmsSwapGroupHandle handle;
3803
3804 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle);
3805
3806 if (pOpenDev == NULL) {
3807 return FALSE;
3808 }
3809
3810 if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) {
3811 return FALSE;
3812 }
3813
3814 pSwapGroup = nvHsAllocSwapGroup(pOpenDev->pDevEvo, &pParams->request);
3815
3816 if (pSwapGroup == NULL) {
3817 return FALSE;
3818 }
3819
3820 handle = nvEvoCreateApiHandle(&pOpenDev->swapGroupHandles, pSwapGroup);
3821
3822 if (handle == 0) {
3823 nvHsFreeSwapGroup(pOpenDev->pDevEvo, pSwapGroup);
3824 return FALSE;
3825 }
3826
3827 pParams->reply.swapGroupHandle = handle;
3828
3829 return TRUE;
3830 }
3831
FreeSwapGroup(struct NvKmsPerOpen * pOpen,void * pParamsVoid)3832 static NvBool FreeSwapGroup(
3833 struct NvKmsPerOpen *pOpen,
3834 void *pParamsVoid)
3835 {
3836 struct NvKmsFreeSwapGroupParams *pParams = pParamsVoid;
3837 struct NvKmsPerOpenDev *pOpenDev;
3838 NVSwapGroupRec *pSwapGroup;
3839 NvKmsSwapGroupHandle handle = pParams->request.swapGroupHandle;
3840
3841 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle);
3842
3843 if (pOpenDev == NULL) {
3844 return FALSE;
3845 }
3846
3847 if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) {
3848 return FALSE;
3849 }
3850
3851 pSwapGroup = nvHsGetSwapGroup(&pOpenDev->swapGroupHandles,
3852 handle);
3853 if (pSwapGroup == NULL) {
3854 return FALSE;
3855 }
3856
3857 nvEvoDestroyApiHandle(&pOpenDev->swapGroupHandles, handle);
3858
3859 nvHsFreeSwapGroup(pOpenDev->pDevEvo, pSwapGroup);
3860
3861 return TRUE;
3862 }
3863
JoinSwapGroup(struct NvKmsPerOpen * pOpen,void * pParamsVoid)3864 static NvBool JoinSwapGroup(
3865 struct NvKmsPerOpen *pOpen,
3866 void *pParamsVoid)
3867 {
3868 struct NvKmsJoinSwapGroupParams *pParams = pParamsVoid;
3869 const struct NvKmsJoinSwapGroupRequestOneMember *pMember =
3870 pParams->request.member;
3871 NvU32 i;
3872 NvBool anySwapGroupsPending = FALSE;
3873 NVHsJoinSwapGroupWorkArea *pJoinSwapGroupWorkArea;
3874
3875 if ((pParams->request.numMembers == 0) ||
3876 (pParams->request.numMembers >
3877 ARRAY_LEN(pParams->request.member))) {
3878 return FALSE;
3879 }
3880
3881 pJoinSwapGroupWorkArea = nvCalloc(pParams->request.numMembers,
3882 sizeof(NVHsJoinSwapGroupWorkArea));
3883
3884 if (!pJoinSwapGroupWorkArea) {
3885 return FALSE;
3886 }
3887
3888 /*
3889 * When a client is joining multiple swap groups simultaneously, all of its
3890 * deferred request fifos must enter the pendingJoined state if any of the
3891 * swap groups it's joining have pending flips. Otherwise, this sequence
3892 * can lead to a deadlock:
3893 *
3894 * - Client 0 joins DRF 0 to SG 0, DRF 1 to SG 1, with SG 0 and SG 1
3895 * fliplocked
3896 * - Client 0 submits DRF 0 ready, SG 0 flips, but the flip won't complete
3897 * and [Client 0.DRF 0] won't be released until SG 1 flips due to
3898 * fliplock
3899 * - Client 1 joins DRF 0 to SG 0, DRF 1 to SG 1
3900 * - Client 0 submits DRF 1 ready, but SG 1 doesn't flip because
3901 * [Client 1.DRF 0] has joined.
3902 *
3903 * With the pendingJoined behavior, this sequence works as follows:
3904 *
3905 * - Client 0 joins DRF 0 to SG 0, DRF 1 to SG 1, with SG 0 and SG 1
3906 * fliplocked
3907 * - Client 0 submits DRF 0 ready, SG 0 flips, but the flip won't complete
3908 * and [Client 0.DRF 0] won't be released until SG 1 flips due to
3909 * fliplock
3910 * - Client 1 joins DRF 0 to SG 0, DRF 1 to SG 1, but both enter the
3911 * pendingJoined state because [Client 0.DRF 0] has a pending flip.
3912 * - Client 0 submits DRF 1 ready, both swap groups flip, Client 0's
3913 * DRFs are both released, and Client 1's DRFs both leave the
3914 * pendingJoined state.
3915 */
3916 for (i = 0; i < pParams->request.numMembers; i++) {
3917 struct NvKmsPerOpenDev *pOpenDev;
3918 NVSwapGroupRec *pSwapGroup;
3919 NVDeferredRequestFifoRec *pDeferredRequestFifo;
3920 struct NvKmsPerOpen *pEventOpenFd = NULL;
3921 NvKmsDeviceHandle deviceHandle = pMember[i].deviceHandle;
3922 NvKmsSwapGroupHandle swapGroupHandle = pMember[i].swapGroupHandle;
3923 NvKmsDeferredRequestFifoHandle deferredRequestFifoHandle =
3924 pMember[i].deferredRequestFifoHandle;
3925
3926 pOpenDev = GetPerOpenDev(pOpen, deviceHandle);
3927
3928 if (pOpenDev == NULL) {
3929 goto fail;
3930 }
3931
3932 pSwapGroup = nvHsGetSwapGroup(&pOpenDev->swapGroupHandles,
3933 swapGroupHandle);
3934
3935 if (pSwapGroup == NULL) {
3936 goto fail;
3937 }
3938
3939 if (pSwapGroup->pendingFlip) {
3940 anySwapGroupsPending = TRUE;
3941 }
3942
3943 /*
3944 * In addition to the check for pending swap groups above, validate
3945 * the remainder of the request now.
3946 */
3947
3948 /*
3949 * Prevent pSwapGroup->nMembers from overflowing NV_U32_MAX.
3950 *
3951 * Ideally we would want to count how many members are being added to
3952 * each swap group in the request, but as an optimization, just verify
3953 * that the number of {fifo, swapgroup} tuples joining would not
3954 * overflow any swapgroup even if every one was joining the same
3955 * swapgroup.
3956 */
3957 if (NV_U32_MAX - pSwapGroup->nMembers < pParams->request.numMembers) {
3958 goto fail;
3959 }
3960
3961 pDeferredRequestFifo =
3962 nvEvoGetPointerFromApiHandle(
3963 &pOpenDev->deferredRequestFifoHandles,
3964 deferredRequestFifoHandle);
3965
3966 if (pDeferredRequestFifo == NULL) {
3967 goto fail;
3968 }
3969
3970 /*
3971 * If the pDeferredRequestFifo is already a member of a SwapGroup, then
3972 * fail.
3973 */
3974 if (pDeferredRequestFifo->swapGroup.pSwapGroup != NULL) {
3975 goto fail;
3976 }
3977
3978 if (pMember[i].unicastEvent.specified) {
3979 pEventOpenFd = nvkms_get_per_open_data(pMember[i].unicastEvent.fd);
3980
3981 if (pEventOpenFd == NULL) {
3982 goto fail;
3983 }
3984
3985 if (!PerOpenIsValidForUnicastEvent(pEventOpenFd)) {
3986 goto fail;
3987 }
3988 }
3989
3990 pJoinSwapGroupWorkArea[i].pDevEvo = pOpenDev->pDevEvo;
3991 pJoinSwapGroupWorkArea[i].pSwapGroup = pSwapGroup;
3992 pJoinSwapGroupWorkArea[i].pDeferredRequestFifo = pDeferredRequestFifo;
3993 pJoinSwapGroupWorkArea[i].pEventOpenFd = pEventOpenFd;
3994 pJoinSwapGroupWorkArea[i].enabledHeadSurface = FALSE;
3995 }
3996
3997 if (!nvHsJoinSwapGroup(pJoinSwapGroupWorkArea,
3998 pParams->request.numMembers,
3999 anySwapGroupsPending)) {
4000 goto fail;
4001 }
4002
4003 /* Beyond this point, the function cannot fail. */
4004
4005 for (i = 0; i < pParams->request.numMembers; i++) {
4006 struct NvKmsPerOpen *pEventOpenFd =
4007 pJoinSwapGroupWorkArea[i].pEventOpenFd;
4008 NVDeferredRequestFifoRec *pDeferredRequestFifo =
4009 pJoinSwapGroupWorkArea[i].pDeferredRequestFifo;
4010
4011 if (pEventOpenFd) {
4012 pDeferredRequestFifo->swapGroup.pOpenUnicastEvent = pEventOpenFd;
4013
4014 pEventOpenFd->unicastEvent.type =
4015 NvKmsUnicastEventTypeDeferredRequest;
4016 pEventOpenFd->unicastEvent.e.deferred.pDeferredRequestFifo =
4017 pDeferredRequestFifo;
4018
4019 pEventOpenFd->type = NvKmsPerOpenTypeUnicastEvent;
4020 }
4021 }
4022
4023 nvFree(pJoinSwapGroupWorkArea);
4024 return TRUE;
4025
4026 fail:
4027 nvFree(pJoinSwapGroupWorkArea);
4028 return FALSE;
4029 }
4030
LeaveSwapGroup(struct NvKmsPerOpen * pOpen,void * pParamsVoid)4031 static NvBool LeaveSwapGroup(
4032 struct NvKmsPerOpen *pOpen,
4033 void *pParamsVoid)
4034 {
4035 struct NvKmsLeaveSwapGroupParams *pParams = pParamsVoid;
4036 const struct NvKmsLeaveSwapGroupRequestOneMember *pMember =
4037 pParams->request.member;
4038 NvU32 i;
4039
4040 if ((pParams->request.numMembers == 0) ||
4041 (pParams->request.numMembers >
4042 ARRAY_LEN(pParams->request.member))) {
4043 return FALSE;
4044 }
4045
4046 /*
4047 * Validate all handles passed by the caller and fail if any are invalid.
4048 */
4049 for (i = 0; i < pParams->request.numMembers; i++) {
4050 struct NvKmsPerOpenDev *pOpenDev;
4051 NVDeferredRequestFifoRec *pDeferredRequestFifo;
4052 NvKmsDeviceHandle deviceHandle =
4053 pMember[i].deviceHandle;
4054 NvKmsDeferredRequestFifoHandle deferredRequestFifoHandle =
4055 pMember[i].deferredRequestFifoHandle;
4056
4057 pOpenDev = GetPerOpenDev(pOpen, deviceHandle);
4058
4059 if (pOpenDev == NULL) {
4060 return FALSE;
4061 }
4062
4063 pDeferredRequestFifo =
4064 nvEvoGetPointerFromApiHandle(
4065 &pOpenDev->deferredRequestFifoHandles,
4066 deferredRequestFifoHandle);
4067
4068 if (pDeferredRequestFifo == NULL) {
4069 return FALSE;
4070 }
4071
4072 if (pDeferredRequestFifo->swapGroup.pSwapGroup == NULL) {
4073 return FALSE;
4074 }
4075 }
4076
4077 /* Beyond this point, the function cannot fail. */
4078
4079 for (i = 0; i < pParams->request.numMembers; i++) {
4080 struct NvKmsPerOpenDev *pOpenDev;
4081 NVDeferredRequestFifoRec *pDeferredRequestFifo;
4082 NvKmsDeviceHandle deviceHandle =
4083 pMember[i].deviceHandle;
4084 NvKmsDeferredRequestFifoHandle deferredRequestFifoHandle =
4085 pMember[i].deferredRequestFifoHandle;
4086
4087 pOpenDev = GetPerOpenDev(pOpen, deviceHandle);
4088
4089 pDeferredRequestFifo =
4090 nvEvoGetPointerFromApiHandle(
4091 &pOpenDev->deferredRequestFifoHandles,
4092 deferredRequestFifoHandle);
4093
4094 nvHsLeaveSwapGroup(pOpenDev->pDevEvo, pDeferredRequestFifo,
4095 FALSE /* teardown */);
4096 }
4097
4098 return TRUE;
4099 }
4100
SetSwapGroupClipList(struct NvKmsPerOpen * pOpen,void * pParamsVoid)4101 static NvBool SetSwapGroupClipList(
4102 struct NvKmsPerOpen *pOpen,
4103 void *pParamsVoid)
4104 {
4105 struct NvKmsSetSwapGroupClipListParams *pParams = pParamsVoid;
4106 struct NvKmsPerOpenDev *pOpenDev;
4107 NVSwapGroupRec *pSwapGroup;
4108 struct NvKmsRect *pClipList;
4109 NvBool ret;
4110
4111 pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle);
4112
4113 if (pOpenDev == NULL) {
4114 return FALSE;
4115 }
4116
4117 if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) {
4118 return FALSE;
4119 }
4120
4121 pSwapGroup = nvHsGetSwapGroup(&pOpenDev->swapGroupHandles,
4122 pParams->request.swapGroupHandle);
4123
4124 if (pSwapGroup == NULL) {
4125 return FALSE;
4126 }
4127
4128 /*
4129 * Create a copy of the passed-in pClipList, to be stored in pSwapGroup.
4130 * Copy from the client using nvkms_copyin() or nvkms_memcpy(), depending on
4131 * the clientType.
4132 *
4133 * We do not use the nvKmsIoctl() prepUser/doneUser infrastructure here
4134 * because that would require creating two copies of pClipList in the
4135 * user-space client case: one allocated in prepUser and freed in doneUser,
4136 * and a second in nvHsSetSwapGroupClipList().
4137 */
4138 if (pParams->request.nClips == 0) {
4139 pClipList = NULL;
4140 } else {
4141 const size_t len = sizeof(struct NvKmsRect) * pParams->request.nClips;
4142
4143 if ((pParams->request.pClipList == 0) ||
4144 !nvKmsNvU64AddressIsSafe(pParams->request.pClipList)) {
4145 return FALSE;
4146 }
4147
4148 pClipList = nvAlloc(len);
4149
4150 if (pClipList == NULL) {
4151 return FALSE;
4152 }
4153
4154 if (pOpen->clientType == NVKMS_CLIENT_USER_SPACE) {
4155 int status =
4156 nvkms_copyin(pClipList, pParams->request.pClipList, len);
4157
4158 if (status != 0) {
4159 nvFree(pClipList);
4160 return FALSE;
4161 }
4162 } else {
4163 const void *pKernelPointer =
4164 nvKmsNvU64ToPointer(pParams->request.pClipList);
4165
4166 nvkms_memcpy(pClipList, pKernelPointer, len);
4167 }
4168 }
4169
4170 ret = nvHsSetSwapGroupClipList(
4171 pOpenDev->pDevEvo,
4172 pSwapGroup,
4173 pParams->request.nClips,
4174 pClipList);
4175
4176 if (!ret) {
4177 nvFree(pClipList);
4178 }
4179
4180 return ret;
4181 }
4182
SwitchMux(struct NvKmsPerOpen * pOpen,void * pParamsVoid)4183 static NvBool SwitchMux(
4184 struct NvKmsPerOpen *pOpen,
4185 void *pParamsVoid)
4186 {
4187 struct NvKmsSwitchMuxParams *pParams = pParamsVoid;
4188 const struct NvKmsSwitchMuxRequest *r = &pParams->request;
4189 NVDpyEvoPtr pDpyEvo;
4190
4191 pDpyEvo = GetPerOpenDpy(pOpen, r->deviceHandle, r->dispHandle, r->dpyId);
4192 if (pDpyEvo == NULL) {
4193 return FALSE;
4194 }
4195
4196 if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(GetPerOpenDev(pOpen, r->deviceHandle))) {
4197 return FALSE;
4198 }
4199
4200 switch (pParams->request.operation) {
4201 case NVKMS_SWITCH_MUX_PRE:
4202 return nvRmMuxPre(pDpyEvo, r->state);
4203 case NVKMS_SWITCH_MUX:
4204 return nvRmMuxSwitch(pDpyEvo, r->state);
4205 case NVKMS_SWITCH_MUX_POST:
4206 return nvRmMuxPost(pDpyEvo, r->state);
4207 default:
4208 return FALSE;
4209 }
4210 }
4211
GetMuxState(struct NvKmsPerOpen * pOpen,void * pParamsVoid)4212 static NvBool GetMuxState(
4213 struct NvKmsPerOpen *pOpen,
4214 void *pParamsVoid)
4215 {
4216 struct NvKmsGetMuxStateParams *pParams = pParamsVoid;
4217 const struct NvKmsGetMuxStateRequest *r = &pParams->request;
4218 NVDpyEvoPtr pDpyEvo;
4219
4220 pDpyEvo = GetPerOpenDpy(pOpen, r->deviceHandle, r->dispHandle, r->dpyId);
4221 if (pDpyEvo == NULL) {
4222 return FALSE;
4223 }
4224
4225 pParams->reply.state = nvRmMuxState(pDpyEvo);
4226
4227 return pParams->reply.state != MUX_STATE_GET;
4228 }
4229
ExportVrrSemaphoreSurface(struct NvKmsPerOpen * pOpen,void * pParamsVoid)4230 static NvBool ExportVrrSemaphoreSurface(
4231 struct NvKmsPerOpen *pOpen,
4232 void *pParamsVoid)
4233 {
4234 struct NvKmsExportVrrSemaphoreSurfaceParams *pParams = pParamsVoid;
4235 const struct NvKmsExportVrrSemaphoreSurfaceRequest *req = &pParams->request;
4236 const struct NvKmsPerOpenDev *pOpenDev =
4237 GetPerOpenDev(pOpen, pParams->request.deviceHandle);
4238
4239 if (pOpenDev == NULL) {
4240 return FALSE;
4241 }
4242
4243 return nvExportVrrSemaphoreSurface(pOpenDev->pDevEvo, req->memFd);
4244 }
4245
EnableAndSetupVblankSyncObject(NVDispEvoRec * pDispEvo,const NvU32 apiHead,NVVblankSyncObjectRec * pVblankSyncObject,NVEvoUpdateState * pUpdateState)4246 static void EnableAndSetupVblankSyncObject(NVDispEvoRec *pDispEvo,
4247 const NvU32 apiHead,
4248 NVVblankSyncObjectRec *pVblankSyncObject,
4249 NVEvoUpdateState *pUpdateState)
4250 {
4251 /*
4252 * The core channel re-allocation code path may end up allocating
4253 * the fewer number of sync objects than the number of sync objects which
4254 * are allocated and in use by the NVKMS clients, hCtxDma = 0 if the
4255 * nvAllocCoreChannelEvo()-> InitApiHeadState()-> nvRmAllocCoreRGSyncpts()
4256 * code path failes to re-allocate that sync object.
4257 */
4258 if (nvApiHeadIsActive(pDispEvo, apiHead) &&
4259 (pVblankSyncObject->evoSyncpt.surfaceDesc.ctxDmaHandle != 0)) {
4260 NvU32 head = nvGetPrimaryHwHead(pDispEvo, apiHead);
4261
4262 nvAssert(head != NV_INVALID_HEAD);
4263
4264 pDispEvo->pDevEvo->hal->ConfigureVblankSyncObject(
4265 pDispEvo->pDevEvo,
4266 pDispEvo->headState[head].timings.rasterBlankStart.y,
4267 head,
4268 pVblankSyncObject->index,
4269 &pVblankSyncObject->evoSyncpt.surfaceDesc,
4270 pUpdateState);
4271
4272 pVblankSyncObject->enabled = TRUE;
4273 }
4274
4275 pVblankSyncObject->inUse = TRUE;
4276 }
4277
EnableAndSetupVblankSyncObjectForAllOpens(NVDevEvoRec * pDevEvo)4278 static void EnableAndSetupVblankSyncObjectForAllOpens(NVDevEvoRec *pDevEvo)
4279 {
4280 /*
4281 * An NVEvoUpdateState has disp-scope, and we will only have
4282 * one disp when programming syncpts.
4283 */
4284 NVEvoUpdateState updateState = { };
4285 struct NvKmsPerOpen *pOpen;
4286
4287 if (!pDevEvo->supportsSyncpts ||
4288 !pDevEvo->hal->caps.supportsVblankSyncObjects) {
4289 return;
4290 }
4291
4292 /* If Syncpts are supported, we're on Orin, which only has one display. */
4293 nvAssert(pDevEvo->nDispEvo == 1);
4294
4295 nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) {
4296 struct NvKmsPerOpenDev *pOpenDev = DevEvoToOpenDev(pOpen, pDevEvo);
4297 struct NvKmsPerOpenDisp *pOpenDisp;
4298 NvKmsGenericHandle disp;
4299
4300 if (pOpenDev == NULL) {
4301 continue;
4302 }
4303
4304 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles,
4305 pOpenDisp, disp) {
4306
4307 nvAssert(pOpenDisp->pDispEvo == pDevEvo->pDispEvo[0]);
4308
4309 for (NvU32 apiHead = 0; apiHead <
4310 ARRAY_LEN(pOpenDisp->vblankSyncObjectHandles); apiHead++) {
4311 NVEvoApiHandlesRec *pHandles =
4312 &pOpenDisp->vblankSyncObjectHandles[apiHead];
4313 NVVblankSyncObjectRec *pVblankSyncObject;
4314 NvKmsVblankSyncObjectHandle handle;
4315
4316 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(pHandles,
4317 pVblankSyncObject, handle) {
4318 EnableAndSetupVblankSyncObject(pOpenDisp->pDispEvo, apiHead,
4319 pVblankSyncObject,
4320 &updateState);
4321 }
4322 }
4323 }
4324 }
4325
4326 if (!nvIsUpdateStateEmpty(pDevEvo, &updateState)) {
4327 nvEvoUpdateAndKickOff(pDevEvo->pDispEvo[0], TRUE, &updateState,
4328 TRUE);
4329 }
4330 }
4331
EnableVblankSyncObject(struct NvKmsPerOpen * pOpen,void * pParamsVoid)4332 static NvBool EnableVblankSyncObject(
4333 struct NvKmsPerOpen *pOpen,
4334 void *pParamsVoid)
4335 {
4336 struct NvKmsEnableVblankSyncObjectParams *pParams = pParamsVoid;
4337 struct NvKmsPerOpenDisp* pOpenDisp = NULL;
4338 NVDispApiHeadStateEvoRec *pApiHeadState = NULL;
4339 NVDevEvoPtr pDevEvo = NULL;
4340 NvKmsVblankSyncObjectHandle vblankHandle = 0;
4341 int freeVblankSyncObjectIdx = 0;
4342 NvU32 apiHead = pParams->request.head;
4343 NVVblankSyncObjectRec *vblankSyncObjects = NULL;
4344 NVDispEvoPtr pDispEvo = NULL;
4345 NVEvoUpdateState updateState = { };
4346
4347 /* Obtain the Head State. */
4348 pOpenDisp = GetPerOpenDisp(pOpen, pParams->request.deviceHandle,
4349 pParams->request.dispHandle);
4350 if (pOpenDisp == NULL) {
4351 nvEvoLogDebug(EVO_LOG_ERROR, "Unable to GetPerOpenDisp.");
4352 return FALSE;
4353 }
4354
4355 pDispEvo = pOpenDisp->pDispEvo;
4356 pDevEvo = pDispEvo->pDevEvo;
4357
4358 /* Ensure Vblank Sync Object API is supported on this chip. */
4359 if (!pDevEvo->supportsSyncpts ||
4360 !pDevEvo->hal->caps.supportsVblankSyncObjects) {
4361 nvEvoLogDebug(EVO_LOG_ERROR, "Vblank Sync Object functionality is not "
4362 "supported on this chip.");
4363 return FALSE;
4364 }
4365
4366 /* Validate requested head because it comes from user input. */
4367 if (apiHead >= ARRAY_LEN(pDispEvo->apiHeadState)) {
4368 nvEvoLogDebug(EVO_LOG_ERROR, "Invalid head requested, head=%d.", apiHead);
4369 return FALSE;
4370 }
4371 pApiHeadState = &pDispEvo->apiHeadState[apiHead];
4372 vblankSyncObjects = pApiHeadState->vblankSyncObjects;
4373 pDevEvo = pDispEvo->pDevEvo;
4374
4375 /*
4376 * Find the available sync object. Sync Objects with handle=0 are not in
4377 * use.
4378 */
4379 for (freeVblankSyncObjectIdx = 0;
4380 freeVblankSyncObjectIdx < pApiHeadState->numVblankSyncObjectsCreated;
4381 freeVblankSyncObjectIdx++) {
4382 if (!vblankSyncObjects[freeVblankSyncObjectIdx].inUse) {
4383 break;
4384 }
4385 }
4386 if (freeVblankSyncObjectIdx == pApiHeadState->numVblankSyncObjectsCreated) {
4387 return FALSE;
4388 }
4389
4390 /* Save the created vblank handle if it is valid. */
4391 vblankHandle =
4392 nvEvoCreateApiHandle(&pOpenDisp->vblankSyncObjectHandles[apiHead],
4393 &vblankSyncObjects[freeVblankSyncObjectIdx]);
4394 if (vblankHandle == 0) {
4395 nvEvoLogDebug(EVO_LOG_ERROR, "Unable to create vblank handle.");
4396 return FALSE;
4397 }
4398
4399 EnableAndSetupVblankSyncObject(pDispEvo, apiHead,
4400 &vblankSyncObjects[freeVblankSyncObjectIdx],
4401 &updateState);
4402 if (!nvIsUpdateStateEmpty(pOpenDisp->pDispEvo->pDevEvo, &updateState)) {
4403 nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, TRUE);
4404 }
4405
4406 /* Populate the reply field. */
4407 pParams->reply.vblankHandle = vblankHandle;
4408 /* Note: the syncpt ID is NOT the same as the vblank handle. */
4409 pParams->reply.syncptId =
4410 pApiHeadState->vblankSyncObjects[freeVblankSyncObjectIdx].evoSyncpt.id;
4411
4412 return TRUE;
4413 }
4414
DisableAndCleanVblankSyncObject(NVDispEvoRec * pDispEvo,const NvU32 apiHead,NVVblankSyncObjectRec * pVblankSyncObject,NVEvoUpdateState * pUpdateState)4415 static void DisableAndCleanVblankSyncObject(NVDispEvoRec *pDispEvo,
4416 const NvU32 apiHead,
4417 NVVblankSyncObjectRec *pVblankSyncObject,
4418 NVEvoUpdateState *pUpdateState)
4419 {
4420 if (nvApiHeadIsActive(pDispEvo, apiHead)) {
4421 NvU32 head = nvGetPrimaryHwHead(pDispEvo, apiHead);
4422
4423 nvAssert(head != NV_INVALID_HEAD);
4424
4425 /*
4426 * Instruct the hardware to disable the semaphore corresponding to this
4427 * syncpt. The Update State will be populated.
4428 *
4429 * Note: Using dummy zero value for rasterLine because the disable
4430 * codepath in ConfigureVblankSyncObject() does not use that argument.
4431 */
4432 pDispEvo->pDevEvo->hal->ConfigureVblankSyncObject(pDispEvo->pDevEvo,
4433 0, /* rasterLine */
4434 head,
4435 pVblankSyncObject->index,
4436 NULL, /* pSurfaceDesc */
4437 pUpdateState);
4438 /*
4439 * Note: it is the caller's responsibility to call
4440 * nvEvoUpdateAndKickOff().
4441 */
4442 }
4443
4444 pVblankSyncObject->inUse = FALSE;
4445 pVblankSyncObject->enabled = FALSE;
4446 }
4447
DisableAndCleanVblankSyncObjectForAllOpens(NVDevEvoRec * pDevEvo)4448 static void DisableAndCleanVblankSyncObjectForAllOpens(NVDevEvoRec *pDevEvo)
4449 {
4450 /*
4451 * An NVEvoUpdateState has disp-scope, and we will only have
4452 * one disp when programming syncpts.
4453 */
4454 NVEvoUpdateState updateState = { };
4455 struct NvKmsPerOpen *pOpen;
4456
4457 if (!pDevEvo->supportsSyncpts ||
4458 !pDevEvo->hal->caps.supportsVblankSyncObjects) {
4459 return;
4460 }
4461
4462 /* If Syncpts are supported, we're on Orin, which only has one display. */
4463 nvAssert(pDevEvo->nDispEvo == 1);
4464
4465 nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) {
4466 struct NvKmsPerOpenDev *pOpenDev = DevEvoToOpenDev(pOpen, pDevEvo);
4467 struct NvKmsPerOpenDisp *pOpenDisp;
4468 NvKmsGenericHandle disp;
4469
4470 if (pOpenDev == NULL) {
4471 continue;
4472 }
4473
4474 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles,
4475 pOpenDisp, disp) {
4476
4477 nvAssert(pOpenDisp->pDispEvo == pDevEvo->pDispEvo[0]);
4478
4479 for (NvU32 apiHead = 0; apiHead <
4480 ARRAY_LEN(pOpenDisp->vblankSyncObjectHandles); apiHead++) {
4481 NVEvoApiHandlesRec *pHandles =
4482 &pOpenDisp->vblankSyncObjectHandles[apiHead];
4483 NVVblankSyncObjectRec *pVblankSyncObject;
4484 NvKmsVblankSyncObjectHandle handle;
4485
4486 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(pHandles,
4487 pVblankSyncObject, handle) {
4488 DisableAndCleanVblankSyncObject(pOpenDisp->pDispEvo, apiHead,
4489 pVblankSyncObject,
4490 &updateState);
4491 }
4492 }
4493 }
4494 }
4495
4496 if (!nvIsUpdateStateEmpty(pDevEvo, &updateState)) {
4497 nvEvoUpdateAndKickOff(pDevEvo->pDispEvo[0], TRUE, &updateState,
4498 TRUE);
4499 }
4500 }
4501
DisableVblankSyncObject(struct NvKmsPerOpen * pOpen,void * pParamsVoid)4502 static NvBool DisableVblankSyncObject(
4503 struct NvKmsPerOpen *pOpen,
4504 void *pParamsVoid)
4505 {
4506 struct NvKmsDisableVblankSyncObjectParams *pParams = pParamsVoid;
4507 struct NvKmsPerOpenDisp* pOpenDisp =
4508 GetPerOpenDisp(pOpen, pParams->request.deviceHandle,
4509 pParams->request.dispHandle);
4510 NVVblankSyncObjectRec *pVblankSyncObject = NULL;
4511 NvU32 apiHead = pParams->request.head;
4512 NVDevEvoPtr pDevEvo = NULL;
4513 NVEvoUpdateState updateState = { };
4514
4515 if (pOpenDisp == NULL) {
4516 nvEvoLogDebug(EVO_LOG_ERROR, "Unable to GetPerOpenDisp.");
4517 return FALSE;
4518 }
4519
4520 pDevEvo = pOpenDisp->pDispEvo->pDevEvo;
4521
4522 /* Ensure Vblank Sync Object API is supported on this chip. */
4523 if (!pDevEvo->supportsSyncpts ||
4524 !pDevEvo->hal->caps.supportsVblankSyncObjects) {
4525 nvEvoLogDebug(EVO_LOG_ERROR, "Vblank Sync Object functionality is not "
4526 "supported on this chip.");
4527 return FALSE;
4528 }
4529
4530 /* Validate requested head because it comes from user input. */
4531 if (apiHead >= ARRAY_LEN(pOpenDisp->pDispEvo->apiHeadState)) {
4532 nvEvoLogDebug(EVO_LOG_ERROR, "Invalid head requested, head=%d.", apiHead);
4533 return FALSE;
4534 }
4535
4536 /* Mark the indicated object as free. */
4537 pVblankSyncObject =
4538 nvEvoGetPointerFromApiHandle(&pOpenDisp->vblankSyncObjectHandles[apiHead],
4539 pParams->request.vblankHandle);
4540 if (pVblankSyncObject == NULL) {
4541 nvEvoLogDebug(EVO_LOG_ERROR, "unable to find object with provided "
4542 "handle.");
4543 return FALSE;
4544 }
4545
4546 DisableAndCleanVblankSyncObject(pOpenDisp->pDispEvo, apiHead,
4547 pVblankSyncObject, &updateState);
4548
4549 if (!nvIsUpdateStateEmpty(pOpenDisp->pDispEvo->pDevEvo, &updateState)) {
4550 /*
4551 * Instruct hardware to execute the staged commands from the
4552 * ConfigureVblankSyncObject() call inside of the
4553 * DisableAndCleanVblankSyncObject() call above. This will set up and
4554 * wait for a notification that the hardware execution has completed.
4555 */
4556 nvEvoUpdateAndKickOff(pOpenDisp->pDispEvo, TRUE, &updateState, TRUE);
4557 }
4558
4559 /* Remove the handle from the map. */
4560 nvEvoDestroyApiHandle(&pOpenDisp->vblankSyncObjectHandles[apiHead],
4561 pParams->request.vblankHandle);
4562
4563 return TRUE;
4564 }
4565
NotifyVblankCallback(NVDispEvoRec * pDispEvo,NVVBlankCallbackPtr pCallbackData)4566 static void NotifyVblankCallback(NVDispEvoRec *pDispEvo,
4567 NVVBlankCallbackPtr pCallbackData)
4568 {
4569 struct NvKmsPerOpen *pEventOpenFd = pCallbackData->pUserData;
4570
4571 /*
4572 * NOTIFY_VBLANK events are single-shot so notify the unicast FD, then
4573 * immediately unregister the callback. The unregister step is done in
4574 * nvRemoveUnicastEvent which resets the unicast event data.
4575 */
4576 nvSendUnicastEvent(pEventOpenFd);
4577 nvRemoveUnicastEvent(pEventOpenFd);
4578 }
4579
NotifyVblank(struct NvKmsPerOpen * pOpen,void * pParamsVoid)4580 static NvBool NotifyVblank(
4581 struct NvKmsPerOpen *pOpen,
4582 void *pParamsVoid)
4583 {
4584 struct NvKmsNotifyVblankParams *pParams = pParamsVoid;
4585 struct NvKmsPerOpen *pEventOpenFd = NULL;
4586 NVVBlankCallbackPtr pCallbackData = NULL;
4587 struct NvKmsPerOpenDisp* pOpenDisp =
4588 GetPerOpenDisp(pOpen, pParams->request.deviceHandle,
4589 pParams->request.dispHandle);
4590 const NvU32 apiHead = pParams->request.head;
4591
4592 pEventOpenFd = nvkms_get_per_open_data(pParams->request.unicastEvent.fd);
4593
4594 if (pEventOpenFd == NULL) {
4595 return NV_FALSE;
4596 }
4597
4598 if (!PerOpenIsValidForUnicastEvent(pEventOpenFd)) {
4599 return NV_FALSE;
4600 }
4601
4602 pEventOpenFd->type = NvKmsPerOpenTypeUnicastEvent;
4603
4604 pCallbackData = nvApiHeadRegisterVBlankCallback(pOpenDisp->pDispEvo,
4605 apiHead,
4606 NotifyVblankCallback,
4607 pEventOpenFd);
4608 if (pCallbackData == NULL) {
4609 return NV_FALSE;
4610 }
4611
4612 pEventOpenFd->unicastEvent.type = NvKmsUnicastEventTypeVblankNotification;
4613 pEventOpenFd->unicastEvent.e.vblankNotification.pOpenDisp = pOpenDisp;
4614 pEventOpenFd->unicastEvent.e.vblankNotification.apiHead = apiHead;
4615 pEventOpenFd->unicastEvent.e.vblankNotification.hCallback
4616 = nvEvoCreateApiHandle(&pOpenDisp->vblankCallbackHandles[apiHead],
4617 pCallbackData);
4618
4619 if (pEventOpenFd->unicastEvent.e.vblankNotification.hCallback == 0) {
4620 nvApiHeadUnregisterVBlankCallback(pOpenDisp->pDispEvo, pCallbackData);
4621 return NV_FALSE;
4622 }
4623
4624 return NV_TRUE;
4625 }
4626
SetFlipLockGroup(struct NvKmsPerOpen * pOpen,void * pParamsVoid)4627 static NvBool SetFlipLockGroup(
4628 struct NvKmsPerOpen *pOpen,
4629 void *pParamsVoid)
4630 {
4631 struct NvKmsSetFlipLockGroupParams *pParams = pParamsVoid;
4632 const struct NvKmsSetFlipLockGroupRequest *pRequest = &pParams->request;
4633 /* Fill in this array as we look up the pDevEvo from the given device
4634 * handles, so that later processing can use it without converting
4635 * deviceHandle -> pDevEvo again. */
4636 NVDevEvoPtr pDevEvo[NV_MAX_SUBDEVICES] = { };
4637 NvU32 dev;
4638
4639 /* Ensure we don't overrun the pDevEvo array. */
4640 ct_assert(ARRAY_LEN(pRequest->dev) == NV_MAX_SUBDEVICES);
4641
4642 for (dev = 0; dev < ARRAY_LEN(pRequest->dev); dev++) {
4643 const struct NvKmsSetFlipLockGroupOneDev *pRequestDev =
4644 &pRequest->dev[dev];
4645 struct NvKmsPerOpenDev *pOpenDev = NULL;
4646 NVDispEvoPtr pDispEvo;
4647 NvU32 dispIndex;
4648 NvU32 i;
4649
4650 if (pRequestDev->requestedDispsBitMask == 0) {
4651 break;
4652 }
4653
4654 pOpenDev = GetPerOpenDev(pOpen, pRequestDev->deviceHandle);
4655
4656 if (pOpenDev == NULL) {
4657 return FALSE;
4658 }
4659
4660 pDevEvo[dev] = pOpenDev->pDevEvo;
4661
4662 /* The caller must be the modeset owner for every specified device. */
4663 if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) {
4664 return FALSE;
4665 }
4666
4667 /* Do not allow the same device to be specified twice. */
4668 for (i = 0; i < dev; i++) {
4669 if (pDevEvo[i] == pDevEvo[dev]) {
4670 return FALSE;
4671 }
4672 }
4673
4674 /* Check for invalid disps in requestedDispsBitMask. */
4675 if (nvHasBitAboveMax(pRequestDev->requestedDispsBitMask,
4676 pDevEvo[dev]->nDispEvo)) {
4677 return FALSE;
4678 }
4679
4680 /* Check for invalid heads in requestedHeadsBitMask. */
4681 FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo[dev]) {
4682 const NvU32 requestedHeadsBitMask =
4683 pRequestDev->disp[dispIndex].requestedHeadsBitMask;
4684 NvU32 apiHead;
4685
4686 if (requestedHeadsBitMask == 0) {
4687 return FALSE;
4688 }
4689 if (nvHasBitAboveMax(requestedHeadsBitMask,
4690 pDevEvo[dev]->numHeads)) {
4691 return FALSE;
4692 }
4693
4694 /*
4695 * Verify that all API heads in requestedHeadsBitMask are active.
4696 * The requested fliplock group will be implicitly disabled if any of
4697 * these heads are specified in a modeset.
4698 */
4699 for (apiHead = 0; apiHead < pDevEvo[dev]->numHeads; apiHead++) {
4700 if ((requestedHeadsBitMask & (1 << apiHead)) != 0) {
4701 if (!nvApiHeadIsActive(pDispEvo, apiHead)) {
4702 return FALSE;
4703 }
4704 }
4705 }
4706 }
4707 }
4708
4709 /* Verify that at least one device was specified */
4710 if (pDevEvo[0] == NULL) {
4711 return FALSE;
4712 }
4713
4714 return nvSetFlipLockGroup(pDevEvo, pRequest);
4715 }
4716
EnableVblankSemControl(struct NvKmsPerOpen * pOpen,void * pParamsVoid)4717 static NvBool EnableVblankSemControl(
4718 struct NvKmsPerOpen *pOpen,
4719 void *pParamsVoid)
4720 {
4721 struct NvKmsEnableVblankSemControlParams *pParams = pParamsVoid;
4722 struct NvKmsPerOpenDev *pOpenDev;
4723 struct NvKmsPerOpenDisp *pOpenDisp;
4724 NVDevEvoPtr pDevEvo;
4725 NVDispEvoRec *pDispEvo;
4726 NVSurfaceEvoPtr pSurfaceEvo;
4727 NVVblankSemControl *pVblankSemControl;
4728 NvKmsVblankSemControlHandle vblankSemControlHandle;
4729 NvU32 hwHead;
4730
4731 if (!GetPerOpenDevAndDisp(pOpen,
4732 pParams->request.deviceHandle,
4733 pParams->request.dispHandle,
4734 &pOpenDev,
4735 &pOpenDisp)) {
4736 return FALSE;
4737 }
4738
4739 pDevEvo = pOpenDev->pDevEvo;
4740 pDispEvo = pOpenDisp->pDispEvo;
4741
4742 pSurfaceEvo =
4743 nvEvoGetSurfaceFromHandleNoDispHWAccessOk(
4744 pDevEvo,
4745 &pOpenDev->surfaceHandles,
4746 pParams->request.surfaceHandle);
4747
4748 if (pSurfaceEvo == NULL) {
4749 return FALSE;
4750 }
4751
4752 hwHead = nvGetPrimaryHwHead(pDispEvo, pParams->request.head);
4753
4754 if (hwHead == NV_INVALID_HEAD) {
4755 return FALSE;
4756 }
4757
4758 pVblankSemControl = nvEvoEnableVblankSemControl(
4759 pDevEvo,
4760 pDispEvo,
4761 hwHead,
4762 pSurfaceEvo,
4763 pParams->request.surfaceOffset);
4764
4765 if (pVblankSemControl == NULL) {
4766 return FALSE;
4767 }
4768
4769 vblankSemControlHandle =
4770 nvEvoCreateApiHandle(&pOpenDisp->vblankSemControlHandles,
4771 pVblankSemControl);
4772
4773 if (vblankSemControlHandle == 0) {
4774 (void)nvEvoDisableVblankSemControl(pDevEvo, pVblankSemControl);
4775 return FALSE;
4776 }
4777
4778 pParams->reply.vblankSemControlHandle = vblankSemControlHandle;
4779
4780 return TRUE;
4781 }
4782
DisableVblankSemControl(struct NvKmsPerOpen * pOpen,void * pParamsVoid)4783 static NvBool DisableVblankSemControl(
4784 struct NvKmsPerOpen *pOpen,
4785 void *pParamsVoid)
4786 {
4787 const struct NvKmsDisableVblankSemControlParams *pParams = pParamsVoid;
4788 struct NvKmsPerOpenDev *pOpenDev;
4789 struct NvKmsPerOpenDisp *pOpenDisp;
4790 NVDevEvoPtr pDevEvo;
4791 NVVblankSemControl *pVblankSemControl;
4792 NvBool ret;
4793
4794 if (!GetPerOpenDevAndDisp(pOpen,
4795 pParams->request.deviceHandle,
4796 pParams->request.dispHandle,
4797 &pOpenDev,
4798 &pOpenDisp)) {
4799 return FALSE;
4800 }
4801
4802 pDevEvo = pOpenDev->pDevEvo;
4803
4804 pVblankSemControl =
4805 nvEvoGetPointerFromApiHandle(&pOpenDisp->vblankSemControlHandles,
4806 pParams->request.vblankSemControlHandle);
4807 if (pVblankSemControl == NULL) {
4808 return FALSE;
4809 }
4810
4811 ret = nvEvoDisableVblankSemControl(pDevEvo, pVblankSemControl);
4812
4813 if (ret) {
4814 nvEvoDestroyApiHandle(&pOpenDisp->vblankSemControlHandles,
4815 pParams->request.vblankSemControlHandle);
4816 }
4817
4818 return ret;
4819 }
4820
AccelVblankSemControls(struct NvKmsPerOpen * pOpen,void * pParamsVoid)4821 static NvBool AccelVblankSemControls(
4822 struct NvKmsPerOpen *pOpen,
4823 void *pParamsVoid)
4824 {
4825 const struct NvKmsAccelVblankSemControlsParams *pParams = pParamsVoid;
4826 struct NvKmsPerOpenDev *pOpenDev;
4827 struct NvKmsPerOpenDisp *pOpenDisp;
4828 NVDevEvoPtr pDevEvo;
4829 const NVDispEvoRec *pDispEvo;
4830 NvU32 apiHead, hwHeadMask = 0;
4831
4832 if (!GetPerOpenDevAndDisp(pOpen,
4833 pParams->request.deviceHandle,
4834 pParams->request.dispHandle,
4835 &pOpenDev,
4836 &pOpenDisp)) {
4837 return FALSE;
4838 }
4839
4840 if (!nvKmsOpenDevHasSubOwnerPermissionOrBetter(pOpenDev)) {
4841 return FALSE;
4842 }
4843
4844 pDevEvo = pOpenDev->pDevEvo;
4845 pDispEvo = pOpenDisp->pDispEvo;
4846
4847 FOR_ALL_HEADS(apiHead, pParams->request.headMask) {
4848 NvU32 hwHead = nvGetPrimaryHwHead(pDispEvo, apiHead);
4849
4850 if (hwHead != NV_INVALID_HEAD) {
4851 hwHeadMask |= NVBIT(hwHead);
4852 }
4853 }
4854
4855 return nvEvoAccelVblankSemControls(
4856 pDevEvo,
4857 pDispEvo->displayOwner,
4858 hwHeadMask);
4859 }
4860
4861 /*!
4862 * Perform the ioctl operation requested by the client.
4863 *
4864 * \param[in,out] pOpenVoid The per-open data, allocated by
4865 * nvKmsOpen().
4866 * \param[in] cmdOpaque The NVKMS_IOCTL_ operation to perform.
4867 * \param[in,out] paramsAddress A pointer, in the client process's
4868 * address space, to the parameter
4869 * structure. This is cmd-specific.
4870 * \param[in] paramSize The client-specified size of the params.
4871 *
4872 * \return Return TRUE if the ioctl operation was successfully
4873 * performed. Otherwise, return FALSE.
4874 */
nvKmsIoctl(void * pOpenVoid,const NvU32 cmdOpaque,const NvU64 paramsAddress,const size_t paramSize)4875 NvBool nvKmsIoctl(
4876 void *pOpenVoid,
4877 const NvU32 cmdOpaque,
4878 const NvU64 paramsAddress,
4879 const size_t paramSize)
4880 {
4881 static const struct {
4882
4883 NvBool (*proc)(struct NvKmsPerOpen *pOpen, void *pParamsVoid);
4884 NvBool (*prepUser)(void *pParamsVoid, void *pExtraStateVoid);
4885 NvBool (*doneUser)(void *pParamsVoid, void *pExtraStateVoid);
4886 const size_t paramSize;
4887 /* Size of extra state tracked for user parameters */
4888 const size_t extraSize;
4889
4890 const size_t requestSize;
4891 const size_t requestOffset;
4892
4893 const size_t replySize;
4894 const size_t replyOffset;
4895
4896 } dispatch[] = {
4897
4898 #define _ENTRY_WITH_USER(_cmd, _func, _prepUser, _doneUser, _extraSize) \
4899 [_cmd] = { \
4900 .proc = _func, \
4901 .prepUser = _prepUser, \
4902 .doneUser = _doneUser, \
4903 .paramSize = sizeof(struct NvKms##_func##Params), \
4904 .requestSize = sizeof(struct NvKms##_func##Request), \
4905 .requestOffset = offsetof(struct NvKms##_func##Params, request), \
4906 .replySize = sizeof(struct NvKms##_func##Reply), \
4907 .replyOffset = offsetof(struct NvKms##_func##Params, reply), \
4908 .extraSize = _extraSize, \
4909 }
4910
4911 #define ENTRY(_cmd, _func) \
4912 _ENTRY_WITH_USER(_cmd, _func, NULL, NULL, 0)
4913
4914 #define ENTRY_CUSTOM_USER(_cmd, _func) \
4915 _ENTRY_WITH_USER(_cmd, _func, \
4916 _func##PrepUser, _func##DoneUser, \
4917 sizeof(struct NvKms##_func##ExtraUserState))
4918
4919 ENTRY(NVKMS_IOCTL_ALLOC_DEVICE, AllocDevice),
4920 ENTRY(NVKMS_IOCTL_FREE_DEVICE, FreeDevice),
4921 ENTRY(NVKMS_IOCTL_QUERY_DISP, QueryDisp),
4922 ENTRY(NVKMS_IOCTL_QUERY_CONNECTOR_STATIC_DATA, QueryConnectorStaticData),
4923 ENTRY(NVKMS_IOCTL_QUERY_CONNECTOR_DYNAMIC_DATA, QueryConnectorDynamicData),
4924 ENTRY(NVKMS_IOCTL_QUERY_DPY_STATIC_DATA, QueryDpyStaticData),
4925 ENTRY(NVKMS_IOCTL_QUERY_DPY_DYNAMIC_DATA, QueryDpyDynamicData),
4926 ENTRY_CUSTOM_USER(NVKMS_IOCTL_VALIDATE_MODE_INDEX, ValidateModeIndex),
4927 ENTRY_CUSTOM_USER(NVKMS_IOCTL_VALIDATE_MODE, ValidateMode),
4928 ENTRY_CUSTOM_USER(NVKMS_IOCTL_SET_MODE, SetMode),
4929 ENTRY(NVKMS_IOCTL_SET_CURSOR_IMAGE, SetCursorImage),
4930 ENTRY(NVKMS_IOCTL_MOVE_CURSOR, MoveCursor),
4931 ENTRY_CUSTOM_USER(NVKMS_IOCTL_SET_LUT, SetLut),
4932 ENTRY(NVKMS_IOCTL_IDLE_BASE_CHANNEL, IdleBaseChannel),
4933 ENTRY_CUSTOM_USER(NVKMS_IOCTL_FLIP, Flip),
4934 ENTRY(NVKMS_IOCTL_DECLARE_DYNAMIC_DPY_INTEREST,
4935 DeclareDynamicDpyInterest),
4936 ENTRY(NVKMS_IOCTL_REGISTER_SURFACE, RegisterSurface),
4937 ENTRY(NVKMS_IOCTL_UNREGISTER_SURFACE, UnregisterSurface),
4938 ENTRY(NVKMS_IOCTL_GRANT_SURFACE, GrantSurface),
4939 ENTRY(NVKMS_IOCTL_ACQUIRE_SURFACE, AcquireSurface),
4940 ENTRY(NVKMS_IOCTL_RELEASE_SURFACE, ReleaseSurface),
4941 ENTRY(NVKMS_IOCTL_SET_DPY_ATTRIBUTE, SetDpyAttribute),
4942 ENTRY(NVKMS_IOCTL_GET_DPY_ATTRIBUTE, GetDpyAttribute),
4943 ENTRY(NVKMS_IOCTL_GET_DPY_ATTRIBUTE_VALID_VALUES,
4944 GetDpyAttributeValidValues),
4945 ENTRY(NVKMS_IOCTL_SET_DISP_ATTRIBUTE, SetDispAttribute),
4946 ENTRY(NVKMS_IOCTL_GET_DISP_ATTRIBUTE, GetDispAttribute),
4947 ENTRY(NVKMS_IOCTL_GET_DISP_ATTRIBUTE_VALID_VALUES,
4948 GetDispAttributeValidValues),
4949 ENTRY(NVKMS_IOCTL_QUERY_FRAMELOCK, QueryFrameLock),
4950 ENTRY(NVKMS_IOCTL_SET_FRAMELOCK_ATTRIBUTE, SetFrameLockAttribute),
4951 ENTRY(NVKMS_IOCTL_GET_FRAMELOCK_ATTRIBUTE, GetFrameLockAttribute),
4952 ENTRY(NVKMS_IOCTL_GET_FRAMELOCK_ATTRIBUTE_VALID_VALUES,
4953 GetFrameLockAttributeValidValues),
4954 ENTRY(NVKMS_IOCTL_GET_NEXT_EVENT, GetNextEvent),
4955 ENTRY(NVKMS_IOCTL_DECLARE_EVENT_INTEREST, DeclareEventInterest),
4956 ENTRY(NVKMS_IOCTL_CLEAR_UNICAST_EVENT, ClearUnicastEvent),
4957 ENTRY(NVKMS_IOCTL_SET_LAYER_POSITION, SetLayerPosition),
4958 ENTRY(NVKMS_IOCTL_GRAB_OWNERSHIP, GrabOwnership),
4959 ENTRY(NVKMS_IOCTL_RELEASE_OWNERSHIP, ReleaseOwnership),
4960 ENTRY(NVKMS_IOCTL_GRANT_PERMISSIONS, GrantPermissions),
4961 ENTRY(NVKMS_IOCTL_ACQUIRE_PERMISSIONS, AcquirePermissions),
4962 ENTRY(NVKMS_IOCTL_REVOKE_PERMISSIONS, RevokePermissions),
4963 ENTRY(NVKMS_IOCTL_QUERY_DPY_CRC32, QueryDpyCRC32),
4964 ENTRY(NVKMS_IOCTL_REGISTER_DEFERRED_REQUEST_FIFO,
4965 RegisterDeferredRequestFifo),
4966 ENTRY(NVKMS_IOCTL_UNREGISTER_DEFERRED_REQUEST_FIFO,
4967 UnregisterDeferredRequestFifo),
4968 ENTRY(NVKMS_IOCTL_ALLOC_SWAP_GROUP, AllocSwapGroup),
4969 ENTRY(NVKMS_IOCTL_FREE_SWAP_GROUP, FreeSwapGroup),
4970 ENTRY(NVKMS_IOCTL_JOIN_SWAP_GROUP, JoinSwapGroup),
4971 ENTRY(NVKMS_IOCTL_LEAVE_SWAP_GROUP, LeaveSwapGroup),
4972 ENTRY(NVKMS_IOCTL_SET_SWAP_GROUP_CLIP_LIST, SetSwapGroupClipList),
4973 ENTRY(NVKMS_IOCTL_GRANT_SWAP_GROUP, GrantSwapGroup),
4974 ENTRY(NVKMS_IOCTL_ACQUIRE_SWAP_GROUP, AcquireSwapGroup),
4975 ENTRY(NVKMS_IOCTL_RELEASE_SWAP_GROUP, ReleaseSwapGroup),
4976 ENTRY(NVKMS_IOCTL_SWITCH_MUX, SwitchMux),
4977 ENTRY(NVKMS_IOCTL_GET_MUX_STATE, GetMuxState),
4978 ENTRY(NVKMS_IOCTL_EXPORT_VRR_SEMAPHORE_SURFACE, ExportVrrSemaphoreSurface),
4979 ENTRY(NVKMS_IOCTL_ENABLE_VBLANK_SYNC_OBJECT, EnableVblankSyncObject),
4980 ENTRY(NVKMS_IOCTL_DISABLE_VBLANK_SYNC_OBJECT, DisableVblankSyncObject),
4981 ENTRY(NVKMS_IOCTL_NOTIFY_VBLANK, NotifyVblank),
4982 ENTRY(NVKMS_IOCTL_SET_FLIPLOCK_GROUP, SetFlipLockGroup),
4983 ENTRY(NVKMS_IOCTL_ENABLE_VBLANK_SEM_CONTROL, EnableVblankSemControl),
4984 ENTRY(NVKMS_IOCTL_DISABLE_VBLANK_SEM_CONTROL, DisableVblankSemControl),
4985 ENTRY(NVKMS_IOCTL_ACCEL_VBLANK_SEM_CONTROLS, AccelVblankSemControls),
4986 };
4987
4988 struct NvKmsPerOpen *pOpen = pOpenVoid;
4989 void *pParamsKernelPointer;
4990 NvBool ret;
4991 enum NvKmsIoctlCommand cmd = cmdOpaque;
4992 void *pExtraUserState = NULL;
4993
4994 if (!AssignNvKmsPerOpenType(pOpen, NvKmsPerOpenTypeIoctl, TRUE)) {
4995 return FALSE;
4996 }
4997
4998 if (cmd >= ARRAY_LEN(dispatch)) {
4999 return FALSE;
5000 }
5001
5002 if (dispatch[cmd].proc == NULL) {
5003 return FALSE;
5004 }
5005
5006 if (paramSize != dispatch[cmd].paramSize) {
5007 return FALSE;
5008 }
5009
5010 if (pOpen->clientType == NVKMS_CLIENT_USER_SPACE) {
5011 pParamsKernelPointer = nvCalloc(1, paramSize + dispatch[cmd].extraSize);
5012 if (pParamsKernelPointer == NULL) {
5013 return FALSE;
5014 }
5015
5016 if (dispatch[cmd].requestSize > 0) {
5017 int status =
5018 nvkms_copyin((char *) pParamsKernelPointer +
5019 dispatch[cmd].requestOffset,
5020 paramsAddress + dispatch[cmd].requestOffset,
5021 dispatch[cmd].requestSize);
5022 if (status != 0) {
5023 nvFree(pParamsKernelPointer);
5024 return FALSE;
5025 }
5026 }
5027
5028 if (dispatch[cmd].prepUser) {
5029 pExtraUserState = (char *)pParamsKernelPointer + paramSize;
5030
5031 if (!dispatch[cmd].prepUser(pParamsKernelPointer,
5032 pExtraUserState)) {
5033 nvFree(pParamsKernelPointer);
5034 return FALSE;
5035 }
5036 }
5037 } else {
5038 pParamsKernelPointer = nvKmsNvU64ToPointer(paramsAddress);
5039 }
5040
5041 ret = dispatch[cmd].proc(pOpen, pParamsKernelPointer);
5042
5043 if (pOpen->clientType == NVKMS_CLIENT_USER_SPACE) {
5044
5045 if (dispatch[cmd].doneUser) {
5046 pExtraUserState = (char *)pParamsKernelPointer + paramSize;
5047
5048 if (!dispatch[cmd].doneUser(pParamsKernelPointer,
5049 pExtraUserState)) {
5050 ret = FALSE;
5051 }
5052 }
5053
5054 if (dispatch[cmd].replySize > 0) {
5055 int status =
5056 nvkms_copyout(paramsAddress + dispatch[cmd].replyOffset,
5057 (char *) pParamsKernelPointer +
5058 dispatch[cmd].replyOffset,
5059 dispatch[cmd].replySize);
5060 if (status != 0) {
5061 ret = FALSE;
5062 }
5063 }
5064
5065 nvFree(pParamsKernelPointer);
5066 }
5067
5068 return ret;
5069 }
5070
5071
5072 /*!
5073 * Close callback.
5074 *
5075 * \param[in,out] pOpenVoid The per-open data, allocated by nvKmsOpen().
5076 */
nvKmsClose(void * pOpenVoid)5077 void nvKmsClose(void *pOpenVoid)
5078 {
5079 struct NvKmsPerOpen *pOpen = pOpenVoid;
5080
5081 if (pOpen == NULL) {
5082 return;
5083 }
5084
5085 /*
5086 * First remove the pOpen from global tracking. Otherwise, assertions can
5087 * fail in the free paths below -- the assertions check that the object
5088 * being freed is not tracked by any pOpen.
5089 */
5090 nvListDel(&pOpen->perOpenListEntry);
5091
5092 if (pOpen->type == NvKmsPerOpenTypeIoctl) {
5093
5094 struct NvKmsPerOpenEventListEntry *pEntry, *pEntryTmp;
5095 struct NvKmsPerOpenDev *pOpenDev;
5096 NvKmsGenericHandle dev;
5097
5098 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles,
5099 pOpenDev, dev) {
5100 FreeDeviceReference(pOpen, pOpenDev);
5101 }
5102
5103 nvEvoDestroyApiHandles(&pOpen->ioctl.frameLockHandles);
5104
5105 nvEvoDestroyApiHandles(&pOpen->ioctl.devHandles);
5106
5107 nvListForEachEntry_safe(pEntry, pEntryTmp,
5108 &pOpen->ioctl.eventList, eventListEntry) {
5109 nvListDel(&pEntry->eventListEntry);
5110 nvFree(pEntry);
5111 }
5112
5113 nvListDel(&pOpen->perOpenIoctlListEntry);
5114 }
5115
5116 if (pOpen->type == NvKmsPerOpenTypeGrantSurface) {
5117 nvAssert(pOpen->grantSurface.pSurfaceEvo != NULL);
5118 nvEvoDecrementSurfaceStructRefCnt(pOpen->grantSurface.pSurfaceEvo);
5119 }
5120
5121 if (pOpen->type == NvKmsPerOpenTypeGrantSwapGroup) {
5122 nvAssert(pOpen->grantSwapGroup.pSwapGroup != NULL);
5123 nvHsDecrementSwapGroupRefCnt(pOpen->grantSwapGroup.pSwapGroup);
5124 }
5125
5126 if (pOpen->type == NvKmsPerOpenTypeUnicastEvent) {
5127 nvRemoveUnicastEvent(pOpen);
5128 }
5129
5130 nvFree(pOpen);
5131 }
5132
5133
5134 /*!
5135 * Open callback.
5136 *
5137 * Allocate, initialize, and return an opaque pointer to an NvKmsPerOpen.
5138 *
5139 * \return If successful, return an NvKmsPerOpen pointer. Otherwise,
5140 * return NULL.
5141 */
nvKmsOpen(NvU32 pid,enum NvKmsClientType clientType,nvkms_per_open_handle_t * pOpenKernel)5142 void *nvKmsOpen(
5143 NvU32 pid,
5144 enum NvKmsClientType clientType,
5145 nvkms_per_open_handle_t *pOpenKernel)
5146 {
5147 struct NvKmsPerOpen *pOpen = nvCalloc(1, sizeof(*pOpen));
5148
5149 if (pOpen == NULL) {
5150 goto fail;
5151 }
5152
5153 pOpen->pid = pid;
5154 pOpen->clientType = clientType;
5155 pOpen->type = NvKmsPerOpenTypeUndefined;
5156 pOpen->pOpenKernel = pOpenKernel;
5157
5158 nvListAppend(&pOpen->perOpenListEntry, &perOpenList);
5159
5160 return pOpen;
5161
5162 fail:
5163 nvKmsClose(pOpen);
5164 return NULL;
5165 }
5166
5167 extern const char *const pNV_KMS_ID;
5168
5169 #if NVKMS_PROCFS_ENABLE
5170
ProcFsPerOpenTypeString(enum NvKmsPerOpenType type)5171 static const char *ProcFsPerOpenTypeString(
5172 enum NvKmsPerOpenType type)
5173 {
5174 switch (type) {
5175 case NvKmsPerOpenTypeIoctl: return "ioctl";
5176 case NvKmsPerOpenTypeGrantSurface: return "grantSurface";
5177 case NvKmsPerOpenTypeGrantSwapGroup: return "grantSwapGroup";
5178 case NvKmsPerOpenTypeGrantPermissions: return "grantPermissions";
5179 case NvKmsPerOpenTypeUnicastEvent: return "unicastEvent";
5180 case NvKmsPerOpenTypeUndefined: return "undefined";
5181 }
5182
5183 return "unknown";
5184 }
5185
ProcFsUnicastEventTypeString(enum NvKmsUnicastEventType type)5186 static const char *ProcFsUnicastEventTypeString(
5187 enum NvKmsUnicastEventType type)
5188 {
5189 switch (type) {
5190 case NvKmsUnicastEventTypeDeferredRequest: return "DeferredRequest";
5191 case NvKmsUnicastEventTypeVblankNotification: return "VblankNotification";
5192 case NvKmsUnicastEventTypeUndefined: return "undefined";
5193 }
5194
5195 return "unknown";
5196 }
5197
ProcFsPerOpenClientTypeString(enum NvKmsClientType clientType)5198 static const char *ProcFsPerOpenClientTypeString(
5199 enum NvKmsClientType clientType)
5200 {
5201 switch (clientType) {
5202 case NVKMS_CLIENT_USER_SPACE: return "user-space";
5203 case NVKMS_CLIENT_KERNEL_SPACE: return "kernel-space";
5204 }
5205
5206 return "unknown";
5207 }
5208
ProcFsPermissionsTypeString(enum NvKmsPermissionsType permissionsType)5209 static const char *ProcFsPermissionsTypeString(
5210 enum NvKmsPermissionsType permissionsType)
5211 {
5212 switch (permissionsType) {
5213 case NV_KMS_PERMISSIONS_TYPE_FLIPPING: return "flipping";
5214 case NV_KMS_PERMISSIONS_TYPE_MODESET: return "modeset";
5215 case NV_KMS_PERMISSIONS_TYPE_SUB_OWNER:return "sub-owner";
5216 }
5217
5218 return "unknown";
5219 }
5220
5221 static void
ProcFsPrintClients(void * data,char * buffer,size_t size,nvkms_procfs_out_string_func_t * outString)5222 ProcFsPrintClients(
5223 void *data,
5224 char *buffer,
5225 size_t size,
5226 nvkms_procfs_out_string_func_t *outString)
5227 {
5228 struct NvKmsPerOpen *pOpen;
5229 NVEvoInfoStringRec infoString;
5230
5231 nvListForEachEntry(pOpen, &perOpenList, perOpenListEntry) {
5232
5233 const char *extra = "";
5234
5235 nvInitInfoString(&infoString, buffer, size);
5236
5237 if (pOpen == nvEvoGlobal.nvKmsPerOpen) {
5238 extra = " (NVKMS-internal client)";
5239 }
5240
5241 nvEvoLogInfoString(&infoString,
5242 "Client (pOpen) : %p", pOpen);
5243 nvEvoLogInfoString(&infoString,
5244 " pid : %d%s", pOpen->pid, extra);
5245 nvEvoLogInfoString(&infoString,
5246 " clientType : %s",
5247 ProcFsPerOpenClientTypeString(pOpen->clientType));
5248 nvEvoLogInfoString(&infoString,
5249 " type : %s",
5250 ProcFsPerOpenTypeString(pOpen->type));
5251
5252 if (pOpen->type == NvKmsPerOpenTypeIoctl) {
5253
5254 NvKmsGenericHandle deviceHandle;
5255 struct NvKmsPerOpenDev *pOpenDev;
5256
5257 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles,
5258 pOpenDev, deviceHandle) {
5259 NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo;
5260
5261 nvEvoLogInfoString(&infoString,
5262 " pDevEvo (deviceId:%02d) : %p",
5263 pDevEvo->deviceId, pDevEvo);
5264 nvEvoLogInfoString(&infoString,
5265 " NvKmsDeviceHandle : %d", deviceHandle);
5266 }
5267
5268 } else if (pOpen->type == NvKmsPerOpenTypeGrantSurface) {
5269
5270 NVSurfaceEvoPtr pSurfaceEvo = pOpen->grantSurface.pSurfaceEvo;
5271
5272 nvEvoLogInfoString(&infoString,
5273 " pSurfaceEvo : %p", pSurfaceEvo);
5274
5275 } else if (pOpen->type == NvKmsPerOpenTypeGrantPermissions) {
5276
5277 NVDevEvoPtr pDevEvo = pOpen->grantPermissions.pDevEvo;
5278 const struct NvKmsPermissions *pPerms =
5279 &pOpen->grantPermissions.permissions;
5280
5281 nvEvoLogInfoString(&infoString,
5282 " pDevEvo (deviceId:%02d) : %p",
5283 pDevEvo->deviceId, pDevEvo);
5284
5285 nvEvoLogInfoString(&infoString,
5286 " PermissionsType : %s",
5287 ProcFsPermissionsTypeString(pPerms->type));
5288
5289 if (pPerms->type == NV_KMS_PERMISSIONS_TYPE_FLIPPING) {
5290 NvU32 d, h;
5291
5292 for (d = 0; d < ARRAY_LEN(pPerms->flip.disp); d++) {
5293 for (h = 0; h < ARRAY_LEN(pPerms->flip.disp[d].head); h++) {
5294
5295 const NvU8 layerMask =
5296 pPerms->flip.disp[d].head[h].layerMask;
5297
5298 if (layerMask == 0) {
5299 continue;
5300 }
5301
5302 nvEvoLogInfoString(&infoString,
5303 " disp:%02d, head:%02d : 0x%08x", d, h,
5304 layerMask);
5305 }
5306 }
5307 } else if (pPerms->type == NV_KMS_PERMISSIONS_TYPE_MODESET) {
5308 NvU32 d, h;
5309
5310 for (d = 0; d < ARRAY_LEN(pPerms->flip.disp); d++) {
5311 for (h = 0; h < ARRAY_LEN(pPerms->flip.disp[d].head); h++) {
5312
5313 NVDpyIdList dpyIdList =
5314 pPerms->modeset.disp[d].head[h].dpyIdList;
5315 NVDispEvoPtr pDispEvo;
5316 char *dpys;
5317
5318 if (nvDpyIdListIsEmpty(dpyIdList)) {
5319 continue;
5320 }
5321
5322 pDispEvo = pDevEvo->pDispEvo[d];
5323
5324 dpys = nvGetDpyIdListStringEvo(pDispEvo, dpyIdList);
5325
5326 if (dpys == NULL) {
5327 continue;
5328 }
5329
5330 nvEvoLogInfoString(&infoString,
5331 " disp:%02d, head:%02d : %s", d, h, dpys);
5332
5333 nvFree(dpys);
5334 }
5335 }
5336 }
5337 } else if (pOpen->type == NvKmsPerOpenTypeGrantSwapGroup) {
5338
5339 NVDevEvoPtr pDevEvo = pOpen->grantSwapGroup.pDevEvo;
5340
5341 nvEvoLogInfoString(&infoString,
5342 " pDevEvo (deviceId:%02d) : %p",
5343 pDevEvo->deviceId, pDevEvo);
5344 nvEvoLogInfoString(&infoString,
5345 " pSwapGroup : %p",
5346 pOpen->grantSwapGroup.pSwapGroup);
5347
5348 } else if (pOpen->type == NvKmsPerOpenTypeUnicastEvent) {
5349 nvEvoLogInfoString(&infoString,
5350 " unicastEvent type : %s",
5351 ProcFsUnicastEventTypeString(pOpen->unicastEvent.type));
5352 switch(pOpen->unicastEvent.type) {
5353 case NvKmsUnicastEventTypeDeferredRequest:
5354 nvEvoLogInfoString(&infoString,
5355 " pDeferredRequestFifo : %p",
5356 pOpen->unicastEvent.e.deferred.pDeferredRequestFifo);
5357 break;
5358 case NvKmsUnicastEventTypeVblankNotification:
5359 nvEvoLogInfoString(&infoString,
5360 " head : %x",
5361 pOpen->unicastEvent.e.vblankNotification.apiHead);
5362 break;
5363 default:
5364 break;
5365 }
5366 }
5367
5368 nvEvoLogInfoString(&infoString, "");
5369 outString(data, buffer);
5370 }
5371 }
5372
PrintSurfacePlanes(NVEvoInfoStringRec * pInfoString,const NVSurfaceEvoRec * pSurfaceEvo)5373 static void PrintSurfacePlanes(
5374 NVEvoInfoStringRec *pInfoString,
5375 const NVSurfaceEvoRec *pSurfaceEvo)
5376 {
5377 NvU8 planeIndex;
5378
5379 FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) {
5380 nvEvoLogInfoString(pInfoString,
5381 "plane[%u] disp ctxDma:0x%08x pitch:%u offset:%" NvU64_fmtu
5382 " rmObjectSizeInBytes:%" NvU64_fmtu,
5383 planeIndex,
5384 pSurfaceEvo->planes[planeIndex].surfaceDesc.ctxDmaHandle,
5385 pSurfaceEvo->planes[planeIndex].pitch,
5386 pSurfaceEvo->planes[planeIndex].offset,
5387 pSurfaceEvo->planes[planeIndex].rmObjectSizeInBytes);
5388 }
5389 }
5390
PrintSurfaceClients(NVEvoInfoStringRec * pInfoString,const NVSurfaceEvoRec * pSurfaceEvo,const NVDevEvoRec * pDevEvo)5391 static void PrintSurfaceClients(
5392 NVEvoInfoStringRec *pInfoString,
5393 const NVSurfaceEvoRec *pSurfaceEvo,
5394 const NVDevEvoRec *pDevEvo)
5395 {
5396 struct NvKmsPerOpen *pOpen;
5397
5398 nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) {
5399 NvKmsGenericHandle deviceHandle;
5400 struct NvKmsPerOpenDev *pOpenDev;
5401
5402 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles,
5403 pOpenDev, deviceHandle) {
5404 NvKmsGenericHandle surfaceHandle;
5405 NVSurfaceEvoPtr pTmpSurfaceEvo;
5406
5407 if (pOpenDev->pDevEvo != pDevEvo) {
5408 continue;
5409 }
5410
5411 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->surfaceHandles,
5412 pTmpSurfaceEvo, surfaceHandle) {
5413 if (pTmpSurfaceEvo != pSurfaceEvo) {
5414 continue;
5415 }
5416
5417 nvEvoLogInfoString(pInfoString,
5418 " pOpen : %p", pOpen);
5419 nvEvoLogInfoString(pInfoString,
5420 " pOpenDev : %p", pOpenDev);
5421 nvEvoLogInfoString(pInfoString,
5422 " NvKmsSurfaceHandle : %d", surfaceHandle);
5423 }
5424 }
5425 }
5426 }
5427
PrintSurface(NVEvoInfoStringRec * pInfoString,const NVSurfaceEvoRec * pSurfaceEvo,const NVDevEvoRec * pDevEvo)5428 static void PrintSurface(
5429 NVEvoInfoStringRec *pInfoString,
5430 const NVSurfaceEvoRec *pSurfaceEvo,
5431 const NVDevEvoRec *pDevEvo)
5432 {
5433 NvU32 sd;
5434
5435 nvEvoLogInfoString(pInfoString,
5436 "pSurfaceEvo : %p", pSurfaceEvo);
5437 nvEvoLogInfoString(pInfoString,
5438 " pDevEvo (deviceId:%02d) : %p", pDevEvo->deviceId, pDevEvo);
5439 nvEvoLogInfoString(pInfoString,
5440 " owner : "
5441 "pOpenDev:%p, NvKmsSurfaceHandle:%d",
5442 pSurfaceEvo->owner.pOpenDev,
5443 pSurfaceEvo->owner.surfaceHandle);
5444 nvEvoLogInfoString(pInfoString,
5445 " {width,height}InPixels : %d x %d",
5446 pSurfaceEvo->widthInPixels,
5447 pSurfaceEvo->heightInPixels);
5448 nvEvoLogInfoString(pInfoString,
5449 " misc : "
5450 "log2GobsPerBlockY:%d",
5451 pSurfaceEvo->log2GobsPerBlockY);
5452 nvEvoLogInfoString(pInfoString,
5453 " gpuAddress : 0x%016" NvU64_fmtx,
5454 pSurfaceEvo->gpuAddress);
5455 nvEvoLogInfoString(pInfoString,
5456 " memory : layout:%s format:%s",
5457 NvKmsSurfaceMemoryLayoutToString(pSurfaceEvo->layout),
5458 nvKmsSurfaceMemoryFormatToString(pSurfaceEvo->format));
5459 nvEvoLogInfoString(pInfoString,
5460 " refCnts : "
5461 "rmRefCnt:%" NvU64_fmtx" structRefCnt:%" NvU64_fmtx,
5462 pSurfaceEvo->rmRefCnt,
5463 pSurfaceEvo->structRefCnt);
5464
5465 PrintSurfacePlanes(pInfoString, pSurfaceEvo);
5466
5467 nvEvoLogInfoString(pInfoString,
5468 " clients :");
5469
5470 PrintSurfaceClients(pInfoString, pSurfaceEvo, pDevEvo);
5471
5472 for (sd = 0; sd < pDevEvo->numSubDevices; sd++) {
5473 if (pSurfaceEvo->cpuAddress[sd] != NULL) {
5474 nvEvoLogInfoString(pInfoString,
5475 " cpuAddress[%02d] : %p",
5476 sd, pSurfaceEvo->cpuAddress[sd]);
5477 }
5478 }
5479
5480 nvEvoLogInfoString(pInfoString, "");
5481 }
5482
5483 static void
ProcFsPrintSurfaces(void * data,char * buffer,size_t size,nvkms_procfs_out_string_func_t * outString)5484 ProcFsPrintSurfaces(
5485 void *data,
5486 char *buffer,
5487 size_t size,
5488 nvkms_procfs_out_string_func_t *outString)
5489 {
5490 struct NvKmsPerOpen *pOpen;
5491 NVEvoInfoStringRec infoString;
5492 NvU32 i;
5493
5494 for (i = 0; i < 2; i++) {
5495
5496 nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) {
5497 NvKmsGenericHandle deviceHandle;
5498 struct NvKmsPerOpenDev *pOpenDev;
5499
5500 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles,
5501 pOpenDev, deviceHandle) {
5502
5503 NvKmsGenericHandle surfaceHandle;
5504 NVSurfaceEvoPtr pSurfaceEvo;
5505
5506 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->surfaceHandles,
5507 pSurfaceEvo,
5508 surfaceHandle) {
5509 /*
5510 * Because clients can grant surfaces between each
5511 * other, a pSurfaceEvo could be in multiple clients'
5512 * lists. So, we loop over all surfaces on all clients
5513 * twice: the first time we print unique surfaces and set
5514 * 'procFsFlag' to recognize duplicates. The second time,
5515 * we clear 'procFsFlag'.
5516 */
5517 if (i == 0) {
5518 if (pSurfaceEvo->procFsFlag) {
5519 continue;
5520 }
5521
5522 nvInitInfoString(&infoString, buffer, size);
5523 PrintSurface(&infoString, pSurfaceEvo,
5524 pOpenDev->pDevEvo);
5525 outString(data, buffer);
5526
5527 pSurfaceEvo->procFsFlag = TRUE;
5528 } else {
5529 pSurfaceEvo->procFsFlag = FALSE;
5530 }
5531 }
5532 }
5533 }
5534 }
5535 }
5536
5537 static void
ProcFsPrintHeadSurface(void * data,char * buffer,size_t size,nvkms_procfs_out_string_func_t * outString)5538 ProcFsPrintHeadSurface(
5539 void *data,
5540 char *buffer,
5541 size_t size,
5542 nvkms_procfs_out_string_func_t *outString)
5543 {
5544 NVDevEvoPtr pDevEvo;
5545 NVDispEvoPtr pDispEvo;
5546 NvU32 dispIndex, apiHead;
5547 NVEvoInfoStringRec infoString;
5548
5549 FOR_ALL_EVO_DEVS(pDevEvo) {
5550
5551 nvInitInfoString(&infoString, buffer, size);
5552 nvEvoLogInfoString(&infoString,
5553 "pDevEvo (deviceId:%02d) : %p",
5554 pDevEvo->deviceId, pDevEvo);
5555 outString(data, buffer);
5556
5557 FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
5558
5559 nvInitInfoString(&infoString, buffer, size);
5560 nvEvoLogInfoString(&infoString,
5561 " pDispEvo (dispIndex:%02d) : %p",
5562 dispIndex, pDispEvo);
5563 outString(data, buffer);
5564
5565 for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) {
5566 nvInitInfoString(&infoString, buffer, size);
5567 nvHsProcFs(&infoString, pDevEvo, dispIndex, apiHead);
5568 nvEvoLogInfoString(&infoString, "");
5569 outString(data, buffer);
5570 }
5571 }
5572 }
5573 }
5574
SwapGroupPerEyeStereoString(const NvU32 request)5575 static const char *SwapGroupPerEyeStereoString(const NvU32 request)
5576 {
5577 const NvU32 value =
5578 DRF_VAL(KMS, _DEFERRED_REQUEST,
5579 _SWAP_GROUP_READY_PER_EYE_STEREO, request);
5580
5581 switch (value) {
5582
5583 case NVKMS_DEFERRED_REQUEST_SWAP_GROUP_READY_PER_EYE_STEREO_PER_PAIR:
5584 return "PerPair";
5585 case NVKMS_DEFERRED_REQUEST_SWAP_GROUP_READY_PER_EYE_STEREO_PER_EYE:
5586 return "PerEye";
5587 }
5588
5589 return "Unknown";
5590 }
5591
ProcFsPrintOneDeferredRequestFifo(void * data,char * buffer,size_t size,nvkms_procfs_out_string_func_t * outString,const NVDeferredRequestFifoRec * pDeferredRequestFifo,const struct NvKmsPerOpen * pOpen,const struct NvKmsPerOpenDev * pOpenDev,const NvKmsDeferredRequestFifoHandle deferredRequestFifoHandle)5592 static void ProcFsPrintOneDeferredRequestFifo(
5593 void *data,
5594 char *buffer,
5595 size_t size,
5596 nvkms_procfs_out_string_func_t *outString,
5597 const NVDeferredRequestFifoRec *pDeferredRequestFifo,
5598 const struct NvKmsPerOpen *pOpen,
5599 const struct NvKmsPerOpenDev *pOpenDev,
5600 const NvKmsDeferredRequestFifoHandle deferredRequestFifoHandle)
5601 {
5602 NVEvoInfoStringRec infoString;
5603
5604 const struct NvKmsDeferredRequestFifo *fifo = pDeferredRequestFifo->fifo;
5605 NvU32 i, prevI;
5606
5607 nvInitInfoString(&infoString, buffer, size);
5608
5609 nvEvoLogInfoString(&infoString,
5610 "pDeferredRequestFifo : %p", pDeferredRequestFifo);
5611
5612 nvEvoLogInfoString(&infoString,
5613 " Client (pOpen) : %p", pOpen);
5614
5615 nvEvoLogInfoString(&infoString,
5616 " pOpenDev : %p", pOpenDev);
5617
5618 nvEvoLogInfoString(&infoString,
5619 " pSurfaceEvo : %p", pDeferredRequestFifo->pSurfaceEvo);
5620
5621 nvEvoLogInfoString(&infoString,
5622 " NvKms...RequestFifoHandle : %d", deferredRequestFifoHandle);
5623
5624 if (pDeferredRequestFifo->swapGroup.pSwapGroup != NULL) {
5625
5626 nvEvoLogInfoString(&infoString,
5627 " swapGroup :");
5628 nvEvoLogInfoString(&infoString,
5629 " pSwapGroup : %p",
5630 pDeferredRequestFifo->swapGroup.pSwapGroup);
5631 nvEvoLogInfoString(&infoString,
5632 " pOpenUnicastEvent : %p",
5633 pDeferredRequestFifo->swapGroup.pOpenUnicastEvent);
5634 nvEvoLogInfoString(&infoString,
5635 " ready : %d",
5636 pDeferredRequestFifo->swapGroup.ready);
5637 nvEvoLogInfoString(&infoString,
5638 " semaphoreIndex : 0x%02x",
5639 pDeferredRequestFifo->swapGroup.semaphoreIndex);
5640 }
5641
5642 nvEvoLogInfoString(&infoString,
5643 " put : %d", fifo->put);
5644
5645 nvEvoLogInfoString(&infoString,
5646 " get : %d", fifo->get);
5647
5648 outString(data, buffer);
5649
5650 for (i = 0; i < ARRAY_LEN(fifo->request); i++) {
5651
5652 const NvU32 request = fifo->request[i];
5653 const NvU32 opcode = DRF_VAL(KMS, _DEFERRED_REQUEST, _OPCODE, request);
5654 const NvU32 semaphoreIndex =
5655 DRF_VAL(KMS, _DEFERRED_REQUEST, _SEMAPHORE_INDEX, request);
5656
5657 switch (opcode) {
5658
5659 case NVKMS_DEFERRED_REQUEST_OPCODE_NOP:
5660 break;
5661
5662 case NVKMS_DEFERRED_REQUEST_OPCODE_SWAP_GROUP_READY:
5663 nvInitInfoString(&infoString, buffer, size);
5664 nvEvoLogInfoString(&infoString,
5665 " request[0x%02x] : "
5666 "opcode:SWAP_GROUP_READY, semaphoreIndex:0x%02x, "
5667 "perEyeStereo:%s",
5668 i, semaphoreIndex,
5669 SwapGroupPerEyeStereoString(request));
5670 outString(data, buffer);
5671 break;
5672
5673 default:
5674 nvInitInfoString(&infoString, buffer, size);
5675 nvEvoLogInfoString(&infoString,
5676 " request[0x%02x] : opcode:INVALID", i);
5677 outString(data, buffer);
5678 break;
5679 }
5680 }
5681
5682 /*
5683 * Print the fifo->semaphore[] array, but collapse multiple lines with
5684 * duplicate values.
5685 *
5686 * To collapse duplicates, loop over all semaphore[] elements. If the
5687 * current element is the same as semaphore[prev], continue. If they
5688 * differ, print the value in semaphore[prev .. i-1], and update prev.
5689 */
5690 prevI = 0;
5691
5692 for (i = 1; i <= ARRAY_LEN(fifo->semaphore); i++) {
5693
5694 const NvU32 prevValue = fifo->semaphore[prevI].data[0];
5695
5696 if (i != ARRAY_LEN(fifo->semaphore)) {
5697 const NvU32 currentValue = fifo->semaphore[i].data[0];
5698
5699 /*
5700 * If the value in this element matches the previous element, don't
5701 * print anything, yet.
5702 */
5703 if (currentValue == prevValue) {
5704 continue;
5705 }
5706 }
5707
5708 nvInitInfoString(&infoString, buffer, size);
5709
5710 if (prevI == (i - 1)) {
5711 nvEvoLogInfoString(&infoString,
5712 " semaphore[0x%02x] : 0x%08x",
5713 prevI, prevValue);
5714 } else {
5715 nvEvoLogInfoString(&infoString,
5716 " semaphore[0x%02x..0x%02x] : 0x%08x",
5717 prevI, i - 1, prevValue);
5718 }
5719
5720 outString(data, buffer);
5721
5722 prevI = i;
5723 }
5724
5725 nvInitInfoString(&infoString, buffer, size);
5726 nvEvoLogInfoString(&infoString, "");
5727 outString(data, buffer);
5728 }
5729
5730 static void
ProcFsPrintDeferredRequestFifos(void * data,char * buffer,size_t size,nvkms_procfs_out_string_func_t * outString)5731 ProcFsPrintDeferredRequestFifos(
5732 void *data,
5733 char *buffer,
5734 size_t size,
5735 nvkms_procfs_out_string_func_t *outString)
5736 {
5737 struct NvKmsPerOpen *pOpen;
5738
5739 nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) {
5740
5741 struct NvKmsPerOpenDev *pOpenDev;
5742 NvKmsGenericHandle devHandle;
5743
5744 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(
5745 &pOpen->ioctl.devHandles,
5746 pOpenDev, devHandle) {
5747
5748 NVDeferredRequestFifoRec *pDeferredRequestFifo;
5749 NvKmsGenericHandle fifoHandle;
5750
5751 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(
5752 &pOpenDev->deferredRequestFifoHandles,
5753 pDeferredRequestFifo, fifoHandle) {
5754
5755 ProcFsPrintOneDeferredRequestFifo(
5756 data, buffer, size, outString,
5757 pDeferredRequestFifo,
5758 pOpen,
5759 pOpenDev,
5760 fifoHandle);
5761 }
5762 }
5763 }
5764 }
5765
5766 static void
ProcFsPrintDpyCrcs(void * data,char * buffer,size_t size,nvkms_procfs_out_string_func_t * outString)5767 ProcFsPrintDpyCrcs(
5768 void *data,
5769 char *buffer,
5770 size_t size,
5771 nvkms_procfs_out_string_func_t *outString)
5772 {
5773 NVDevEvoPtr pDevEvo;
5774 NVDispEvoPtr pDispEvo;
5775 NvU32 dispIndex, head;
5776 NVEvoInfoStringRec infoString;
5777
5778 FOR_ALL_EVO_DEVS(pDevEvo) {
5779
5780 nvInitInfoString(&infoString, buffer, size);
5781 nvEvoLogInfoString(&infoString,
5782 "pDevEvo (deviceId:%02d) : %p",
5783 pDevEvo->deviceId, pDevEvo);
5784 outString(data, buffer);
5785
5786 FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
5787
5788 nvInitInfoString(&infoString, buffer, size);
5789 nvEvoLogInfoString(&infoString,
5790 " pDispEvo (dispIndex:%02d) : %p",
5791 dispIndex, pDispEvo);
5792 outString(data, buffer);
5793
5794 for (head = 0; head < pDevEvo->numHeads; head++) {
5795 const NVDispHeadStateEvoRec *pHeadState =
5796 &pDispEvo->headState[head];
5797 struct NvKmsDpyCRC32 compCrc;
5798 struct NvKmsDpyCRC32 rgCrc;
5799 struct NvKmsDpyCRC32 outputCrc;
5800 CRC32NotifierCrcOut crcOut;
5801 crcOut.compositorCrc32 = &compCrc;
5802 crcOut.rasterGeneratorCrc32 = &rgCrc;
5803 crcOut.outputCrc32 = &outputCrc;
5804
5805 if (pHeadState->pConnectorEvo == NULL) {
5806 continue;
5807 }
5808
5809 nvInitInfoString(&infoString, buffer, size);
5810 if (nvReadCRC32Evo(pDispEvo, head, &crcOut)) {
5811 nvEvoLogInfoString(&infoString,
5812 " head %d :",
5813 head);
5814 if (compCrc.supported) {
5815 nvEvoLogInfoString(&infoString,
5816 " compositor CRC : 0x%08x",
5817 compCrc.value);
5818 } else {
5819 nvEvoLogInfoString(&infoString,
5820 " compositor CRC : unsupported");
5821 }
5822 if (rgCrc.supported) {
5823 nvEvoLogInfoString(&infoString,
5824 " raster generator CRC : 0x%08x",
5825 rgCrc.value);
5826 } else {
5827 nvEvoLogInfoString(&infoString,
5828 " raster generator CRC : unsupported");
5829 }
5830 if (outputCrc.supported) {
5831 nvEvoLogInfoString(&infoString,
5832 " output CRC : 0x%08x",
5833 outputCrc.value);
5834 } else {
5835 nvEvoLogInfoString(&infoString,
5836 " output CRC : unsupported");
5837 }
5838 } else {
5839 nvEvoLogInfoString(&infoString,
5840 " head %d : error",
5841 head);
5842 }
5843 outString(data, buffer);
5844 }
5845 }
5846 }
5847 }
5848
5849 static const char *
SignalFormatString(NvKmsConnectorSignalFormat signalFormat)5850 SignalFormatString(NvKmsConnectorSignalFormat signalFormat)
5851 {
5852 switch (signalFormat) {
5853 case NVKMS_CONNECTOR_SIGNAL_FORMAT_VGA: return "VGA";
5854 case NVKMS_CONNECTOR_SIGNAL_FORMAT_LVDS: return "LVDS";
5855 case NVKMS_CONNECTOR_SIGNAL_FORMAT_TMDS: return "TMDS";
5856 case NVKMS_CONNECTOR_SIGNAL_FORMAT_DP: return "DP";
5857 case NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI: return "DSI";
5858 case NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN: break;
5859 }
5860
5861 return "unknown";
5862 }
5863
5864 static const char *
PixelDepthString(enum nvKmsPixelDepth pixelDepth)5865 PixelDepthString(enum nvKmsPixelDepth pixelDepth)
5866 {
5867 switch (pixelDepth) {
5868 case NVKMS_PIXEL_DEPTH_18_444: return "18bpp 4:4:4";
5869 case NVKMS_PIXEL_DEPTH_24_444: return "24bpp 4:4:4";
5870 case NVKMS_PIXEL_DEPTH_30_444: return "30bpp 4:4:4";
5871 case NVKMS_PIXEL_DEPTH_20_422: return "20bpp 4:2:2";
5872 case NVKMS_PIXEL_DEPTH_16_422: return "16bpp 4:2:2";
5873 }
5874
5875 return "unknown";
5876 }
5877
5878 static void
ProcFsPrintHeads(void * data,char * buffer,size_t size,nvkms_procfs_out_string_func_t * outString)5879 ProcFsPrintHeads(
5880 void *data,
5881 char *buffer,
5882 size_t size,
5883 nvkms_procfs_out_string_func_t *outString)
5884 {
5885 NVDevEvoPtr pDevEvo;
5886 NVDispEvoPtr pDispEvo;
5887 NvU32 dispIndex, head;
5888 NVEvoInfoStringRec infoString;
5889
5890 FOR_ALL_EVO_DEVS(pDevEvo) {
5891
5892 nvInitInfoString(&infoString, buffer, size);
5893 nvEvoLogInfoString(&infoString,
5894 "pDevEvo (deviceId:%02d) : %p",
5895 pDevEvo->deviceId, pDevEvo);
5896 outString(data, buffer);
5897
5898 FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
5899 const NVLockGroup *pLockGroup = pDispEvo->pLockGroup;
5900
5901 nvInitInfoString(&infoString, buffer, size);
5902 nvEvoLogInfoString(&infoString,
5903 " pDispEvo (dispIndex:%02d) : %p",
5904 dispIndex, pDispEvo);
5905 if (pLockGroup != NULL) {
5906 const NvBool flipLocked = nvIsLockGroupFlipLocked(pLockGroup);
5907 nvEvoLogInfoString(&infoString,
5908 " pLockGroup : %p",
5909 pLockGroup);
5910 nvEvoLogInfoString(&infoString,
5911 " flipLock : %s",
5912 flipLocked ? "yes" : "no");
5913 }
5914 outString(data, buffer);
5915
5916 if (pDevEvo->coreInitMethodsPending) {
5917 /* If the core channel has been allocated but no mode has yet
5918 * been set, pConnectorEvo will be non-NULL for heads being
5919 * driven by the console, but data like the mode timings will
5920 * be bogus. */
5921 nvInitInfoString(&infoString, buffer, size);
5922 nvEvoLogInfoString(&infoString, " (not yet initialized)");
5923 outString(data, buffer);
5924 continue;
5925 }
5926
5927 for (head = 0; head < pDevEvo->numHeads; head++) {
5928 const NVDispHeadStateEvoRec *pHeadState =
5929 &pDispEvo->headState[head];
5930 const NVConnectorEvoRec *pConnectorEvo =
5931 pHeadState->pConnectorEvo;
5932 const NVHwModeTimingsEvo *pHwModeTimings =
5933 &pHeadState->timings;
5934
5935 nvInitInfoString(&infoString, buffer, size);
5936 if (pConnectorEvo == NULL) {
5937 nvEvoLogInfoString(&infoString,
5938 " head %d : inactive",
5939 head);
5940 } else {
5941 const NvU32 refreshRate10kHz =
5942 nvGetRefreshRate10kHz(pHwModeTimings);
5943
5944 nvEvoLogInfoString(&infoString,
5945 " head %d : %s",
5946 head, pConnectorEvo->name);
5947
5948 nvEvoLogInfoString(&infoString,
5949 " protocol : %s",
5950 SignalFormatString(pConnectorEvo->signalFormat));
5951
5952 nvEvoLogInfoString(&infoString,
5953 " mode : %u x %u @ %u.%04u Hz",
5954 nvEvoVisibleWidth(pHwModeTimings),
5955 nvEvoVisibleHeight(pHwModeTimings),
5956 refreshRate10kHz / 10000,
5957 refreshRate10kHz % 10000);
5958
5959 nvEvoLogInfoString(&infoString,
5960 " depth : %s",
5961 PixelDepthString(pHeadState->pixelDepth));
5962 }
5963 outString(data, buffer);
5964 }
5965 }
5966 }
5967 }
5968
5969 #endif /* NVKMS_PROCFS_ENABLE */
5970
nvKmsGetProcFiles(const nvkms_procfs_file_t ** ppProcFiles)5971 void nvKmsGetProcFiles(const nvkms_procfs_file_t **ppProcFiles)
5972 {
5973 #if NVKMS_PROCFS_ENABLE
5974 static const nvkms_procfs_file_t procFiles[] = {
5975 { "clients", ProcFsPrintClients },
5976 { "surfaces", ProcFsPrintSurfaces },
5977 { "headsurface", ProcFsPrintHeadSurface },
5978 { "deferred-request-fifos", ProcFsPrintDeferredRequestFifos },
5979 { "crcs", ProcFsPrintDpyCrcs },
5980 { "heads", ProcFsPrintHeads },
5981 { NULL, NULL },
5982 };
5983
5984 *ppProcFiles = procFiles;
5985 #else
5986 *ppProcFiles = NULL;
5987 #endif
5988 }
5989
FreeGlobalState(void)5990 static void FreeGlobalState(void)
5991 {
5992 nvInvalidateRasterLockGroupsEvo();
5993
5994 nvKmsClose(nvEvoGlobal.nvKmsPerOpen);
5995 nvEvoGlobal.nvKmsPerOpen = NULL;
5996
5997 if (nvEvoGlobal.clientHandle != 0) {
5998 nvRmApiFree(nvEvoGlobal.clientHandle, nvEvoGlobal.clientHandle,
5999 nvEvoGlobal.clientHandle);
6000 nvEvoGlobal.clientHandle = 0;
6001 }
6002
6003 nvClearDpyOverrides();
6004 }
6005
nvKmsModuleLoad(void)6006 NvBool nvKmsModuleLoad(void)
6007 {
6008 NvU32 ret = NVOS_STATUS_ERROR_GENERIC;
6009
6010 nvEvoLog(EVO_LOG_INFO, "Loading %s", pNV_KMS_ID);
6011
6012 ret = nvRmApiAlloc(NV01_NULL_OBJECT,
6013 NV01_NULL_OBJECT,
6014 NV01_NULL_OBJECT,
6015 NV01_ROOT,
6016 &nvEvoGlobal.clientHandle);
6017
6018 if (ret != NVOS_STATUS_SUCCESS) {
6019 nvEvoLog(EVO_LOG_ERROR, "Failed to initialize client");
6020 goto fail;
6021 }
6022
6023 nvEvoGlobal.nvKmsPerOpen = nvKmsOpen(0, NVKMS_CLIENT_KERNEL_SPACE, NULL);
6024 if (!nvEvoGlobal.nvKmsPerOpen) {
6025 nvEvoLog(EVO_LOG_ERROR, "Failed to initialize internal modeset client");
6026 goto fail;
6027 }
6028
6029 if (!AssignNvKmsPerOpenType(nvEvoGlobal.nvKmsPerOpen,
6030 NvKmsPerOpenTypeIoctl, FALSE)) {
6031 goto fail;
6032 }
6033
6034 return TRUE;
6035 fail:
6036 FreeGlobalState();
6037
6038 return FALSE;
6039 }
6040
6041
nvKmsModuleUnload(void)6042 void nvKmsModuleUnload(void)
6043 {
6044 FreeGlobalState();
6045
6046 nvAssert(nvListIsEmpty(&nvEvoGlobal.frameLockList));
6047 nvAssert(nvListIsEmpty(&nvEvoGlobal.devList));
6048 #if defined(DEBUG)
6049 nvReportUnfreedAllocations();
6050 #endif
6051 nvEvoLog(EVO_LOG_INFO, "Unloading");
6052 }
6053
6054
SendEvent(struct NvKmsPerOpen * pOpen,const struct NvKmsEvent * pEvent)6055 static void SendEvent(struct NvKmsPerOpen *pOpen,
6056 const struct NvKmsEvent *pEvent)
6057 {
6058 struct NvKmsPerOpenEventListEntry *pEntry = nvAlloc(sizeof(*pEntry));
6059
6060 nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl);
6061
6062 if (pEntry == NULL) {
6063 return;
6064 }
6065
6066 pEntry->event = *pEvent;
6067 nvListAppend(&pEntry->eventListEntry, &pOpen->ioctl.eventList);
6068
6069 nvkms_event_queue_changed(pOpen->pOpenKernel, TRUE);
6070 }
6071
ConsoleRestoreTimerFired(void * dataPtr,NvU32 dataU32)6072 static void ConsoleRestoreTimerFired(void *dataPtr, NvU32 dataU32)
6073 {
6074 NVDevEvoPtr pDevEvo = dataPtr;
6075
6076 if (pDevEvo->modesetOwner == NULL && pDevEvo->handleConsoleHotplugs) {
6077 pDevEvo->skipConsoleRestore = FALSE;
6078 nvEvoRestoreConsole(pDevEvo, TRUE /* allowMST */);
6079 }
6080 }
6081
6082 /*!
6083 * Generate a dpy event.
6084 *
6085 * \param[in] pDpyEvo The dpy for which the event should be generated.
6086 * \param[in] eventType The NVKMS_EVENT_TYPE_
6087 * \param[in] attribute The NvKmsDpyAttribute; only used for
6088 * NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED.
6089 * \param[in] NvS64 The NvKmsDpyAttribute value; only used for
6090 * NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED.
6091 */
SendDpyEventEvo(const NVDpyEvoRec * pDpyEvo,const NvU32 eventType,const enum NvKmsDpyAttribute attribute,const NvS64 value)6092 static void SendDpyEventEvo(const NVDpyEvoRec *pDpyEvo,
6093 const NvU32 eventType,
6094 const enum NvKmsDpyAttribute attribute,
6095 const NvS64 value)
6096 {
6097 struct NvKmsPerOpen *pOpen;
6098 const NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo;
6099
6100 nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) {
6101
6102 struct NvKmsEvent event = { 0 };
6103 NvKmsDeviceHandle deviceHandle;
6104 NvKmsDispHandle dispHandle;
6105
6106 if (!DispEvoToDevAndDispHandles(pOpen, pDispEvo,
6107 &deviceHandle, &dispHandle)) {
6108 continue;
6109 }
6110
6111 if ((pOpen->ioctl.eventInterestMask & NVBIT(eventType)) == 0) {
6112 continue;
6113 }
6114
6115 event.eventType = eventType;
6116
6117 switch (eventType) {
6118
6119 case NVKMS_EVENT_TYPE_DPY_CHANGED:
6120 event.u.dpyChanged.deviceHandle = deviceHandle;
6121 event.u.dpyChanged.dispHandle = dispHandle;
6122 event.u.dpyChanged.dpyId = pDpyEvo->id;
6123 break;
6124
6125 case NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED:
6126 event.u.dynamicDpyConnected.deviceHandle = deviceHandle;
6127 event.u.dynamicDpyConnected.dispHandle = dispHandle;
6128 event.u.dynamicDpyConnected.dpyId = pDpyEvo->id;
6129 break;
6130
6131 case NVKMS_EVENT_TYPE_DYNAMIC_DPY_DISCONNECTED:
6132 event.u.dynamicDpyDisconnected.deviceHandle = deviceHandle;
6133 event.u.dynamicDpyDisconnected.dispHandle = dispHandle;
6134 event.u.dynamicDpyDisconnected.dpyId = pDpyEvo->id;
6135 break;
6136
6137 case NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED:
6138 event.u.dpyAttributeChanged.deviceHandle = deviceHandle;
6139 event.u.dpyAttributeChanged.dispHandle = dispHandle;
6140 event.u.dpyAttributeChanged.dpyId = pDpyEvo->id;
6141 event.u.dpyAttributeChanged.attribute = attribute;
6142 event.u.dpyAttributeChanged.value = value;
6143 break;
6144
6145 default:
6146 nvAssert(!"Bad eventType");
6147 return;
6148 }
6149
6150 SendEvent(pOpen, &event);
6151 }
6152
6153 if (eventType == NVKMS_EVENT_TYPE_DPY_CHANGED) {
6154 NVDevEvoPtr pDevEvo = pDpyEvo->pDispEvo->pDevEvo;
6155
6156 if (pDevEvo->modesetOwner == NULL && pDevEvo->handleConsoleHotplugs) {
6157 nvkms_free_timer(pDevEvo->consoleRestoreTimer);
6158 pDevEvo->consoleRestoreTimer =
6159 nvkms_alloc_timer(ConsoleRestoreTimerFired, pDevEvo, 0, 500);
6160 }
6161 }
6162 }
6163
nvSendDpyEventEvo(const NVDpyEvoRec * pDpyEvo,const NvU32 eventType)6164 void nvSendDpyEventEvo(const NVDpyEvoRec *pDpyEvo, const NvU32 eventType)
6165 {
6166 nvAssert(eventType != NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED);
6167 SendDpyEventEvo(pDpyEvo, eventType,
6168 0 /* attribute (unused) */,
6169 0 /* value (unused) */ );
6170 }
6171
nvSendDpyAttributeChangedEventEvo(const NVDpyEvoRec * pDpyEvo,const enum NvKmsDpyAttribute attribute,const NvS64 value)6172 void nvSendDpyAttributeChangedEventEvo(const NVDpyEvoRec *pDpyEvo,
6173 const enum NvKmsDpyAttribute attribute,
6174 const NvS64 value)
6175 {
6176 SendDpyEventEvo(pDpyEvo,
6177 NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED,
6178 attribute, value);
6179 }
6180
nvSendFrameLockAttributeChangedEventEvo(const NVFrameLockEvoRec * pFrameLockEvo,const enum NvKmsFrameLockAttribute attribute,const NvS64 value)6181 void nvSendFrameLockAttributeChangedEventEvo(
6182 const NVFrameLockEvoRec *pFrameLockEvo,
6183 const enum NvKmsFrameLockAttribute attribute,
6184 const NvS64 value)
6185 {
6186 struct NvKmsPerOpen *pOpen;
6187 const NvU32 eventType = NVKMS_EVENT_TYPE_FRAMELOCK_ATTRIBUTE_CHANGED;
6188
6189 nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) {
6190
6191 struct NvKmsEvent event = { 0 };
6192 NvKmsFrameLockHandle frameLockHandle;
6193
6194 if ((pOpen->ioctl.eventInterestMask & NVBIT(eventType)) == 0) {
6195 continue;
6196 }
6197
6198 if (!FrameLockEvoToFrameLockHandle(pOpen, pFrameLockEvo,
6199 &frameLockHandle)) {
6200 continue;
6201 }
6202
6203 event.eventType = eventType;
6204 event.u.frameLockAttributeChanged.frameLockHandle = frameLockHandle;
6205 event.u.frameLockAttributeChanged.attribute = attribute;
6206 event.u.frameLockAttributeChanged.value = value;
6207
6208 SendEvent(pOpen, &event);
6209 }
6210 }
6211
6212
nvSendFlipOccurredEventEvo(const NVDispEvoRec * pDispEvo,const NvU32 apiHead,const NvU32 layer)6213 void nvSendFlipOccurredEventEvo(const NVDispEvoRec *pDispEvo,
6214 const NvU32 apiHead, const NvU32 layer)
6215 {
6216 struct NvKmsPerOpen *pOpen;
6217 const NvU32 eventType = NVKMS_EVENT_TYPE_FLIP_OCCURRED;
6218
6219 nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) {
6220
6221 struct NvKmsEvent event = { 0 };
6222 NvKmsDeviceHandle deviceHandle;
6223 NvKmsDispHandle dispHandle;
6224
6225 struct NvKmsPerOpenDev *pOpenDev;
6226 const struct NvKmsFlipPermissions *pFlipPermissions;
6227
6228 pOpenDev = DevEvoToOpenDev(pOpen, pDispEvo->pDevEvo);
6229
6230 if (pOpenDev == NULL) {
6231 continue;
6232 }
6233
6234 if ((pOpen->ioctl.eventInterestMask & NVBIT(eventType)) == 0) {
6235 continue;
6236 }
6237
6238 pFlipPermissions = &pOpenDev->flipPermissions;
6239
6240 if ((pFlipPermissions->disp[pDispEvo->displayOwner].
6241 head[apiHead].layerMask & NVBIT(layer)) == 0x0) {
6242 continue;
6243 }
6244
6245 if (!DispEvoToDevAndDispHandles(pOpen, pDispEvo,
6246 &deviceHandle, &dispHandle)) {
6247 continue;
6248 }
6249
6250 event.eventType = eventType;
6251 event.u.flipOccurred.deviceHandle = deviceHandle;
6252 event.u.flipOccurred.dispHandle = dispHandle;
6253 event.u.flipOccurred.head = apiHead;
6254 event.u.flipOccurred.layer = layer;
6255
6256 SendEvent(pOpen, &event);
6257 }
6258 }
6259
nvSendUnicastEvent(struct NvKmsPerOpen * pOpen)6260 void nvSendUnicastEvent(struct NvKmsPerOpen *pOpen)
6261 {
6262 if (pOpen == NULL) {
6263 return;
6264 }
6265
6266 nvAssert(pOpen->type == NvKmsPerOpenTypeUnicastEvent);
6267 nvAssert(pOpen->unicastEvent.type != NvKmsUnicastEventTypeUndefined);
6268
6269 nvkms_event_queue_changed(pOpen->pOpenKernel, TRUE);
6270 }
6271
nvRemoveUnicastEvent(struct NvKmsPerOpen * pOpen)6272 void nvRemoveUnicastEvent(struct NvKmsPerOpen *pOpen)
6273 {
6274 NVDeferredRequestFifoPtr pDeferredRequestFifo;
6275 NvKmsGenericHandle callbackHandle;
6276 NVVBlankCallbackPtr pCallbackData;
6277 struct NvKmsPerOpenDisp *pOpenDisp;
6278 NvU32 apiHead;
6279
6280 if (pOpen == NULL) {
6281 return;
6282 }
6283
6284 nvAssert(pOpen->type == NvKmsPerOpenTypeUnicastEvent);
6285
6286 switch(pOpen->unicastEvent.type)
6287 {
6288 case NvKmsUnicastEventTypeDeferredRequest:
6289 pDeferredRequestFifo =
6290 pOpen->unicastEvent.e.deferred.pDeferredRequestFifo;
6291
6292 pDeferredRequestFifo->swapGroup.pOpenUnicastEvent = NULL;
6293 pOpen->unicastEvent.e.deferred.pDeferredRequestFifo = NULL;
6294 break;
6295 case NvKmsUnicastEventTypeVblankNotification:
6296 /* grab fields from the unicast fd */
6297 callbackHandle =
6298 pOpen->unicastEvent.e.vblankNotification.hCallback;
6299 pOpenDisp =
6300 pOpen->unicastEvent.e.vblankNotification.pOpenDisp;
6301 apiHead = pOpen->unicastEvent.e.vblankNotification.apiHead;
6302
6303 /* Unregister the vblank callback */
6304 pCallbackData =
6305 nvEvoGetPointerFromApiHandle(&pOpenDisp->vblankCallbackHandles[apiHead],
6306 callbackHandle);
6307
6308 nvApiHeadUnregisterVBlankCallback(pOpenDisp->pDispEvo,
6309 pCallbackData);
6310
6311 nvEvoDestroyApiHandle(&pOpenDisp->vblankCallbackHandles[apiHead],
6312 callbackHandle);
6313
6314 /* invalidate the pOpen data */
6315 pOpen->unicastEvent.e.vblankNotification.hCallback = 0;
6316 pOpen->unicastEvent.e.vblankNotification.pOpenDisp = NULL;
6317 pOpen->unicastEvent.e.vblankNotification.apiHead = NV_INVALID_HEAD;
6318 break;
6319 default:
6320 nvAssert("Invalid Unicast Event Type!");
6321 break;
6322 }
6323
6324 pOpen->unicastEvent.type = NvKmsUnicastEventTypeUndefined;
6325 }
6326
AllocSurfaceCtxDmasForAllOpens(NVDevEvoRec * pDevEvo)6327 static void AllocSurfaceCtxDmasForAllOpens(NVDevEvoRec *pDevEvo)
6328 {
6329 struct NvKmsPerOpen *pOpen;
6330
6331 nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) {
6332 struct NvKmsPerOpenDev *pOpenDev = DevEvoToOpenDev(pOpen, pDevEvo);
6333
6334 NvKmsGenericHandle surfaceHandle;
6335 NVSurfaceEvoPtr pSurfaceEvo;
6336
6337 if (pOpenDev == NULL) {
6338 continue;
6339 }
6340
6341 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->surfaceHandles,
6342 pSurfaceEvo, surfaceHandle) {
6343
6344 NvU8 planeIndex;
6345
6346 if (!nvEvoIsSurfaceOwner(pSurfaceEvo, pOpenDev, surfaceHandle)) {
6347 continue;
6348 }
6349
6350 if (!pSurfaceEvo->requireDisplayHardwareAccess) {
6351 nvAssert(pSurfaceEvo->planes[0].surfaceDesc.ctxDmaHandle == 0);
6352 continue;
6353 }
6354
6355 /*
6356 * Orphan surfaces should not get this far: they should
6357 * fail the owner check above.
6358 */
6359 nvAssert(pSurfaceEvo->rmRefCnt > 0);
6360
6361 FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) {
6362 NvU32 ret =
6363 nvRmAllocAndBindSurfaceDescriptor(
6364 pDevEvo,
6365 pSurfaceEvo->planes[planeIndex].rmHandle,
6366 pSurfaceEvo->layout,
6367 pSurfaceEvo->planes[planeIndex].rmObjectSizeInBytes - 1,
6368 &pSurfaceEvo->planes[planeIndex].surfaceDesc);
6369 if (ret != NVOS_STATUS_SUCCESS) {
6370 FreeSurfaceCtxDmasForAllOpens(pDevEvo);
6371 nvAssert(!"Failed to re-allocate surface descriptor");
6372 return;
6373 }
6374 }
6375 }
6376 }
6377 }
6378
6379
FreeSurfaceCtxDmasForAllOpens(NVDevEvoRec * pDevEvo)6380 static void FreeSurfaceCtxDmasForAllOpens(NVDevEvoRec *pDevEvo)
6381 {
6382 struct NvKmsPerOpen *pOpen;
6383
6384 nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) {
6385 struct NvKmsPerOpenDev *pOpenDev = DevEvoToOpenDev(pOpen, pDevEvo);
6386
6387 NvKmsGenericHandle surfaceHandle;
6388 NVSurfaceEvoPtr pSurfaceEvo;
6389
6390 if (pOpenDev == NULL) {
6391 continue;
6392 }
6393
6394 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->surfaceHandles,
6395 pSurfaceEvo, surfaceHandle) {
6396
6397 NvU8 planeIndex;
6398
6399 if (!nvEvoIsSurfaceOwner(pSurfaceEvo, pOpenDev, surfaceHandle)) {
6400 continue;
6401 }
6402
6403 /*
6404 * Orphan surfaces should not get this far: they should
6405 * fail the owner check above.
6406 */
6407 nvAssert(pSurfaceEvo->rmRefCnt > 0);
6408
6409 if (!pSurfaceEvo->requireDisplayHardwareAccess) {
6410 nvAssert(pSurfaceEvo->planes[0].surfaceDesc.ctxDmaHandle == 0);
6411 continue;
6412 }
6413
6414 FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) {
6415 pDevEvo->hal->FreeSurfaceDescriptor(
6416 pDevEvo,
6417 nvEvoGlobal.clientHandle,
6418 &pSurfaceEvo->planes[planeIndex].surfaceDesc);
6419 }
6420 }
6421 }
6422 }
6423
6424 #if defined(DEBUG)
nvSurfaceEvoInAnyOpens(const NVSurfaceEvoRec * pSurfaceEvo)6425 NvBool nvSurfaceEvoInAnyOpens(const NVSurfaceEvoRec *pSurfaceEvo)
6426 {
6427 struct NvKmsPerOpen *pOpen;
6428
6429 nvListForEachEntry(pOpen, &perOpenList, perOpenListEntry) {
6430
6431 if (pOpen->type == NvKmsPerOpenTypeIoctl) {
6432 struct NvKmsPerOpenDev *pOpenDev;
6433 NvKmsGenericHandle dev;
6434
6435 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles,
6436 pOpenDev, dev) {
6437
6438 NvKmsGenericHandle surfaceHandleUnused;
6439 NVSurfaceEvoPtr pSurfaceEvoTmp;
6440
6441 if (pOpenDev == NULL) {
6442 continue;
6443 }
6444
6445 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->surfaceHandles,
6446 pSurfaceEvoTmp,
6447 surfaceHandleUnused) {
6448 if (pSurfaceEvoTmp == pSurfaceEvo) {
6449 return TRUE;
6450 }
6451 }
6452 }
6453 } else if ((pOpen->type == NvKmsPerOpenTypeGrantSurface) &&
6454 (pOpen->grantSurface.pSurfaceEvo == pSurfaceEvo)) {
6455 return TRUE;
6456 }
6457 }
6458
6459 return FALSE;
6460 }
6461 #endif
6462
nvGetFlipPermissionsFromOpenDev(const struct NvKmsPerOpenDev * pOpenDev)6463 const struct NvKmsFlipPermissions *nvGetFlipPermissionsFromOpenDev(
6464 const struct NvKmsPerOpenDev *pOpenDev)
6465 {
6466 nvAssert(pOpenDev != NULL);
6467 return &pOpenDev->flipPermissions;
6468 }
6469
nvGetModesetPermissionsFromOpenDev(const struct NvKmsPerOpenDev * pOpenDev)6470 const struct NvKmsModesetPermissions *nvGetModesetPermissionsFromOpenDev(
6471 const struct NvKmsPerOpenDev *pOpenDev)
6472 {
6473 nvAssert(pOpenDev != NULL);
6474 return &pOpenDev->modesetPermissions;
6475 }
6476
nvGetSurfaceHandlesFromOpenDev(struct NvKmsPerOpenDev * pOpenDev)6477 NVEvoApiHandlesRec *nvGetSurfaceHandlesFromOpenDev(
6478 struct NvKmsPerOpenDev *pOpenDev)
6479 {
6480 if (pOpenDev == NULL) {
6481 return NULL;
6482 }
6483
6484 return &pOpenDev->surfaceHandles;
6485 }
6486
nvGetSurfaceHandlesFromOpenDevConst(const struct NvKmsPerOpenDev * pOpenDev)6487 const NVEvoApiHandlesRec *nvGetSurfaceHandlesFromOpenDevConst(
6488 const struct NvKmsPerOpenDev *pOpenDev)
6489 {
6490 if (pOpenDev == NULL) {
6491 return NULL;
6492 }
6493
6494 return &pOpenDev->surfaceHandles;
6495 }
6496
6497 static int suspendCounter = 0;
6498
6499 /*
6500 * Suspend NVKMS.
6501 *
6502 * This function is called by RM once per GPU, but NVKMS just counts the number
6503 * of suspend calls so that it can deallocate the core channels on the first
6504 * call to suspend(), and reallocate them on the last call to resume().
6505 */
nvKmsSuspend(NvU32 gpuId)6506 void nvKmsSuspend(NvU32 gpuId)
6507 {
6508 if (suspendCounter == 0) {
6509 NVDevEvoPtr pDevEvo;
6510
6511 FOR_ALL_EVO_DEVS(pDevEvo) {
6512 nvEvoLogDevDebug(pDevEvo, EVO_LOG_INFO, "Suspending");
6513
6514 /*
6515 * Shut down all heads and skip console restore.
6516 *
6517 * This works around an RM bug where it fails to train DisplayPort
6518 * links during resume if the system was suspended while heads were
6519 * active.
6520 *
6521 * XXX TODO bug 1850734: In addition to fixing the above
6522 * RM bug, NVKMS should clear pDispEvo head and connector state
6523 * that becomes stale after suspend. Shutting the heads down here
6524 * clears the relevant state explicitly.
6525 */
6526 nvShutDownApiHeads(pDevEvo, pDevEvo->pNvKmsOpenDev,
6527 NULL /* pTestFunc, shut down all heads */,
6528 NULL /* pData */,
6529 TRUE /* doRasterLock */);
6530 pDevEvo->skipConsoleRestore = TRUE;
6531
6532 DisableAndCleanVblankSyncObjectForAllOpens(pDevEvo);
6533
6534 FreeSurfaceCtxDmasForAllOpens(pDevEvo);
6535
6536 nvSuspendDevEvo(pDevEvo);
6537 }
6538 }
6539
6540 suspendCounter++;
6541 }
6542
nvKmsResume(NvU32 gpuId)6543 void nvKmsResume(NvU32 gpuId)
6544 {
6545 suspendCounter--;
6546
6547 if (suspendCounter == 0) {
6548 NVDevEvoPtr pDevEvo;
6549
6550 FOR_ALL_EVO_DEVS(pDevEvo) {
6551 nvEvoLogDevDebug(pDevEvo, EVO_LOG_INFO, "Resuming");
6552
6553 if (nvResumeDevEvo(pDevEvo)) {
6554 nvDPSetAllowMultiStreaming(pDevEvo, TRUE /* allowMST */);
6555 EnableAndSetupVblankSyncObjectForAllOpens(pDevEvo);
6556 AllocSurfaceCtxDmasForAllOpens(pDevEvo);
6557 }
6558
6559 if (pDevEvo->modesetOwner == NULL) {
6560 // Hardware state was lost, so we need to force a console
6561 // restore.
6562 pDevEvo->skipConsoleRestore = FALSE;
6563 RestoreConsole(pDevEvo);
6564 }
6565 }
6566 }
6567 }
6568
ServiceOneDeferredRequestFifo(NVDevEvoPtr pDevEvo,NVDeferredRequestFifoRec * pDeferredRequestFifo)6569 static void ServiceOneDeferredRequestFifo(
6570 NVDevEvoPtr pDevEvo,
6571 NVDeferredRequestFifoRec *pDeferredRequestFifo)
6572 {
6573 struct NvKmsDeferredRequestFifo *fifo = pDeferredRequestFifo->fifo;
6574 NvU32 get, put;
6575
6576 nvAssert(fifo != NULL);
6577
6578 get = fifo->get;
6579 put = fifo->put;
6580
6581 if (put == get) {
6582 return;
6583 }
6584
6585 if ((get >= ARRAY_LEN(fifo->request)) ||
6586 (put >= ARRAY_LEN(fifo->request))) {
6587 return;
6588 }
6589
6590 while (get != put) {
6591
6592 const NvU32 request = fifo->request[get];
6593 const NvU32 opcode =
6594 DRF_VAL(KMS, _DEFERRED_REQUEST, _OPCODE, request);
6595
6596 switch (opcode) {
6597
6598 case NVKMS_DEFERRED_REQUEST_OPCODE_NOP:
6599 break;
6600
6601 case NVKMS_DEFERRED_REQUEST_OPCODE_SWAP_GROUP_READY:
6602 nvHsSwapGroupReady(
6603 pDevEvo,
6604 pDeferredRequestFifo,
6605 request);
6606 break;
6607
6608 default:
6609 nvAssert(!"Invalid NVKMS deferred request opcode");
6610 break;
6611 }
6612
6613 get = (get + 1) % ARRAY_LEN(fifo->request);
6614 }
6615
6616 fifo->get = put;
6617 }
6618
6619 /*!
6620 * Respond to a non-stall interrupt.
6621 */
nvKmsServiceNonStallInterrupt(void * dataPtr,NvU32 dataU32)6622 void nvKmsServiceNonStallInterrupt(void *dataPtr, NvU32 dataU32)
6623 {
6624 NVDevEvoPtr pDevEvo = dataPtr;
6625 struct NvKmsPerOpen *pOpen;
6626
6627 nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) {
6628
6629 struct NvKmsPerOpenDev *pOpenDev = DevEvoToOpenDev(pOpen, pDevEvo);
6630 NVDeferredRequestFifoRec *pDeferredRequestFifo;
6631 NvKmsGenericHandle handle;
6632
6633 if (pOpenDev == NULL) {
6634 continue;
6635 }
6636
6637 FOR_ALL_POINTERS_IN_EVO_API_HANDLES(
6638 &pOpenDev->deferredRequestFifoHandles,
6639 pDeferredRequestFifo,
6640 handle) {
6641
6642 ServiceOneDeferredRequestFifo(pDevEvo, pDeferredRequestFifo);
6643 }
6644 }
6645
6646 nvHsProcessPendingViewportFlips(pDevEvo);
6647 }
6648
nvKmsGetBacklight(NvU32 display_id,void * drv_priv,NvU32 * brightness)6649 NvBool nvKmsGetBacklight(NvU32 display_id, void *drv_priv, NvU32 *brightness)
6650 {
6651 NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS params = { 0 };
6652 NV_STATUS status = NV_ERR_INVALID_STATE;
6653 NVDispEvoRec *pDispEvo = drv_priv;
6654 NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
6655
6656 params.subDeviceInstance = pDispEvo->displayOwner;
6657 params.displayId = display_id;
6658 params.brightnessType = NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_PERCENT100;
6659
6660 status = nvRmApiControl(nvEvoGlobal.clientHandle,
6661 pDevEvo->displayCommonHandle,
6662 NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS,
6663 ¶ms, sizeof(params));
6664
6665 if (status == NV_OK) {
6666 *brightness = params.brightness;
6667 }
6668
6669 return status == NV_OK;
6670 }
6671
nvKmsSetBacklight(NvU32 display_id,void * drv_priv,NvU32 brightness)6672 NvBool nvKmsSetBacklight(NvU32 display_id, void *drv_priv, NvU32 brightness)
6673 {
6674 NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS params = { 0 };
6675 NV_STATUS status = NV_ERR_INVALID_STATE;
6676 NVDispEvoRec *pDispEvo = drv_priv;
6677 NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
6678
6679 params.subDeviceInstance = pDispEvo->displayOwner;
6680 params.displayId = display_id;
6681 params.brightness = brightness;
6682 params.brightnessType = NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_PERCENT100;
6683
6684 status = nvRmApiControl(nvEvoGlobal.clientHandle,
6685 pDevEvo->displayCommonHandle,
6686 NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS,
6687 ¶ms, sizeof(params));
6688
6689 return status == NV_OK;
6690 }
6691
nvKmsOpenDevHasSubOwnerPermissionOrBetter(const struct NvKmsPerOpenDev * pOpenDev)6692 NvBool nvKmsOpenDevHasSubOwnerPermissionOrBetter(const struct NvKmsPerOpenDev *pOpenDev)
6693 {
6694 return pOpenDev->isPrivileged ||
6695 pOpenDev->pDevEvo->modesetOwner == pOpenDev ||
6696 pOpenDev->pDevEvo->modesetSubOwner == pOpenDev;
6697 }
6698