1 /*
2 * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "nvidia-drm-conftest.h" /* NV_DRM_AVAILABLE and NV_DRM_DRM_GEM_H_PRESENT */
24
25 #include "nvidia-drm-priv.h"
26 #include "nvidia-drm-drv.h"
27 #include "nvidia-drm-fb.h"
28 #include "nvidia-drm-modeset.h"
29 #include "nvidia-drm-encoder.h"
30 #include "nvidia-drm-connector.h"
31 #include "nvidia-drm-gem.h"
32 #include "nvidia-drm-crtc.h"
33 #include "nvidia-drm-fence.h"
34 #include "nvidia-drm-helper.h"
35 #include "nvidia-drm-gem-nvkms-memory.h"
36 #include "nvidia-drm-gem-user-memory.h"
37 #include "nvidia-drm-gem-dma-buf.h"
38
39 #if defined(NV_DRM_AVAILABLE)
40
41 #include "nvidia-drm-ioctl.h"
42
43 #if defined(NV_DRM_DRMP_H_PRESENT)
44 #include <drm/drmP.h>
45 #endif
46
47 #if defined(NV_DRM_DRM_ATOMIC_UAPI_H_PRESENT)
48 #include <drm/drm_atomic_uapi.h>
49 #endif
50
51 #if defined(NV_DRM_DRM_VBLANK_H_PRESENT)
52 #include <drm/drm_vblank.h>
53 #endif
54
55 #if defined(NV_DRM_DRM_FILE_H_PRESENT)
56 #include <drm/drm_file.h>
57 #endif
58
59 #if defined(NV_DRM_DRM_PRIME_H_PRESENT)
60 #include <drm/drm_prime.h>
61 #endif
62
63 #if defined(NV_DRM_DRM_IOCTL_H_PRESENT)
64 #include <drm/drm_ioctl.h>
65 #endif
66
67 #if defined(NV_DRM_FBDEV_GENERIC_AVAILABLE)
68 #include <drm/drm_aperture.h>
69 #include <drm/drm_fb_helper.h>
70 #endif
71
72 #if defined(NV_DRM_DRM_FBDEV_GENERIC_H_PRESENT)
73 #include <drm/drm_fbdev_generic.h>
74 #endif
75
76 #include <linux/pci.h>
77 #include <linux/workqueue.h>
78
79 /*
80 * Commit fcd70cd36b9b ("drm: Split out drm_probe_helper.h")
81 * moves a number of helper function definitions from
82 * drm/drm_crtc_helper.h to a new drm_probe_helper.h.
83 */
84 #if defined(NV_DRM_DRM_PROBE_HELPER_H_PRESENT)
85 #include <drm/drm_probe_helper.h>
86 #endif
87 #include <drm/drm_crtc_helper.h>
88
89 #if defined(NV_DRM_DRM_GEM_H_PRESENT)
90 #include <drm/drm_gem.h>
91 #endif
92
93 #if defined(NV_DRM_DRM_AUTH_H_PRESENT)
94 #include <drm/drm_auth.h>
95 #endif
96
97 #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
98 #include <drm/drm_atomic_helper.h>
99 #endif
100
101 static int nv_drm_revoke_modeset_permission(struct drm_device *dev,
102 struct drm_file *filep,
103 NvU32 dpyId);
104 static int nv_drm_revoke_sub_ownership(struct drm_device *dev);
105
106 static struct nv_drm_device *dev_list = NULL;
107
nv_get_input_colorspace_name(enum NvKmsInputColorSpace colorSpace)108 static const char* nv_get_input_colorspace_name(
109 enum NvKmsInputColorSpace colorSpace)
110 {
111 switch (colorSpace) {
112 case NVKMS_INPUT_COLORSPACE_NONE:
113 return "None";
114 case NVKMS_INPUT_COLORSPACE_SCRGB_LINEAR:
115 return "IEC 61966-2-2 linear FP";
116 case NVKMS_INPUT_COLORSPACE_BT2100_PQ:
117 return "ITU-R BT.2100-PQ YCbCr";
118 default:
119 /* We shoudn't hit this */
120 WARN_ON("Unsupported input colorspace");
121 return "None";
122 }
123 };
124
125 #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
126
nv_drm_output_poll_changed(struct drm_device * dev)127 static void nv_drm_output_poll_changed(struct drm_device *dev)
128 {
129 struct drm_connector *connector = NULL;
130 struct drm_mode_config *config = &dev->mode_config;
131 #if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
132 struct drm_connector_list_iter conn_iter;
133 nv_drm_connector_list_iter_begin(dev, &conn_iter);
134 #endif
135 /*
136 * Here drm_mode_config::mutex has been acquired unconditionally:
137 *
138 * - In the non-NV_DRM_CONNECTOR_LIST_ITER_PRESENT case, the mutex must
139 * be held for the duration of walking over the connectors.
140 *
141 * - In the NV_DRM_CONNECTOR_LIST_ITER_PRESENT case, the mutex must be
142 * held for the duration of a fill_modes() call chain:
143 * connector->funcs->fill_modes()
144 * |-> drm_helper_probe_single_connector_modes()
145 *
146 * It is easiest to always acquire the mutext for the entire connector
147 * loop.
148 */
149 mutex_lock(&config->mutex);
150
151 nv_drm_for_each_connector(connector, &conn_iter, dev) {
152
153 struct nv_drm_connector *nv_connector = to_nv_connector(connector);
154
155 if (!nv_drm_connector_check_connection_status_dirty_and_clear(
156 nv_connector)) {
157 continue;
158 }
159
160 connector->funcs->fill_modes(
161 connector,
162 dev->mode_config.max_width, dev->mode_config.max_height);
163 }
164
165 mutex_unlock(&config->mutex);
166 #if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
167 nv_drm_connector_list_iter_end(&conn_iter);
168 #endif
169 }
170
nv_drm_framebuffer_create(struct drm_device * dev,struct drm_file * file,const struct drm_mode_fb_cmd2 * cmd)171 static struct drm_framebuffer *nv_drm_framebuffer_create(
172 struct drm_device *dev,
173 struct drm_file *file,
174 #if defined(NV_DRM_HELPER_MODE_FILL_FB_STRUCT_HAS_CONST_MODE_CMD_ARG)
175 const struct drm_mode_fb_cmd2 *cmd
176 #else
177 struct drm_mode_fb_cmd2 *cmd
178 #endif
179 )
180 {
181 struct drm_mode_fb_cmd2 local_cmd;
182 struct drm_framebuffer *fb;
183
184 local_cmd = *cmd;
185
186 fb = nv_drm_internal_framebuffer_create(
187 dev,
188 file,
189 &local_cmd);
190
191 #if !defined(NV_DRM_HELPER_MODE_FILL_FB_STRUCT_HAS_CONST_MODE_CMD_ARG)
192 *cmd = local_cmd;
193 #endif
194
195 return fb;
196 }
197
198 static const struct drm_mode_config_funcs nv_mode_config_funcs = {
199 .fb_create = nv_drm_framebuffer_create,
200
201 .atomic_state_alloc = nv_drm_atomic_state_alloc,
202 .atomic_state_clear = nv_drm_atomic_state_clear,
203 .atomic_state_free = nv_drm_atomic_state_free,
204 .atomic_check = nv_drm_atomic_check,
205 .atomic_commit = nv_drm_atomic_commit,
206
207 .output_poll_changed = nv_drm_output_poll_changed,
208 };
209
nv_drm_event_callback(const struct NvKmsKapiEvent * event)210 static void nv_drm_event_callback(const struct NvKmsKapiEvent *event)
211 {
212 struct nv_drm_device *nv_dev = event->privateData;
213
214 mutex_lock(&nv_dev->lock);
215
216 if (!atomic_read(&nv_dev->enable_event_handling)) {
217 goto done;
218 }
219
220 switch (event->type) {
221 case NVKMS_EVENT_TYPE_DPY_CHANGED:
222 nv_drm_handle_display_change(
223 nv_dev,
224 event->u.displayChanged.display);
225 break;
226
227 case NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED:
228 nv_drm_handle_dynamic_display_connected(
229 nv_dev,
230 event->u.dynamicDisplayConnected.display);
231 break;
232 case NVKMS_EVENT_TYPE_FLIP_OCCURRED:
233 nv_drm_handle_flip_occurred(
234 nv_dev,
235 event->u.flipOccurred.head,
236 event->u.flipOccurred.layer);
237 break;
238 default:
239 break;
240 }
241
242 done:
243
244 mutex_unlock(&nv_dev->lock);
245 }
246
247 /*
248 * Helper function to initialize drm_device::mode_config from
249 * NvKmsKapiDevice's resource information.
250 */
251 static void
nv_drm_init_mode_config(struct nv_drm_device * nv_dev,const struct NvKmsKapiDeviceResourcesInfo * pResInfo)252 nv_drm_init_mode_config(struct nv_drm_device *nv_dev,
253 const struct NvKmsKapiDeviceResourcesInfo *pResInfo)
254 {
255 struct drm_device *dev = nv_dev->dev;
256
257 drm_mode_config_init(dev);
258 drm_mode_create_dvi_i_properties(dev);
259
260 dev->mode_config.funcs = &nv_mode_config_funcs;
261
262 dev->mode_config.min_width = pResInfo->caps.minWidthInPixels;
263 dev->mode_config.min_height = pResInfo->caps.minHeightInPixels;
264
265 dev->mode_config.max_width = pResInfo->caps.maxWidthInPixels;
266 dev->mode_config.max_height = pResInfo->caps.maxHeightInPixels;
267
268 dev->mode_config.cursor_width = pResInfo->caps.maxCursorSizeInPixels;
269 dev->mode_config.cursor_height = pResInfo->caps.maxCursorSizeInPixels;
270
271 /*
272 * NVIDIA GPUs have no preferred depth. Arbitrarily report 24, to be
273 * consistent with other DRM drivers.
274 */
275
276 dev->mode_config.preferred_depth = 24;
277 dev->mode_config.prefer_shadow = 1;
278
279 #if defined(NV_DRM_CRTC_STATE_HAS_ASYNC_FLIP) || \
280 defined(NV_DRM_CRTC_STATE_HAS_PAGEFLIP_FLAGS)
281 dev->mode_config.async_page_flip = true;
282 #else
283 dev->mode_config.async_page_flip = false;
284 #endif
285
286 #if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT) && \
287 defined(NV_DRM_MODE_CONFIG_HAS_ALLOW_FB_MODIFIERS)
288 /* Allow clients to define framebuffer layouts using DRM format modifiers */
289 dev->mode_config.allow_fb_modifiers = true;
290 #endif
291
292 /* Initialize output polling support */
293
294 drm_kms_helper_poll_init(dev);
295
296 /* Disable output polling, because we don't support it yet */
297
298 drm_kms_helper_poll_disable(dev);
299 }
300
301 /*
302 * Helper function to enumerate encoders/connectors from NvKmsKapiDevice.
303 */
nv_drm_enumerate_encoders_and_connectors(struct nv_drm_device * nv_dev)304 static void nv_drm_enumerate_encoders_and_connectors
305 (
306 struct nv_drm_device *nv_dev
307 )
308 {
309 struct drm_device *dev = nv_dev->dev;
310 NvU32 nDisplays = 0;
311
312 if (!nvKms->getDisplays(nv_dev->pDevice, &nDisplays, NULL)) {
313 NV_DRM_DEV_LOG_ERR(
314 nv_dev,
315 "Failed to enumurate NvKmsKapiDisplay count");
316 }
317
318 if (nDisplays != 0) {
319 NvKmsKapiDisplay *hDisplays =
320 nv_drm_calloc(nDisplays, sizeof(*hDisplays));
321
322 if (hDisplays != NULL) {
323 if (!nvKms->getDisplays(nv_dev->pDevice, &nDisplays, hDisplays)) {
324 NV_DRM_DEV_LOG_ERR(
325 nv_dev,
326 "Failed to enumurate NvKmsKapiDisplay handles");
327 } else {
328 NvU32 i;
329
330 for (i = 0; i < nDisplays; i++) {
331 struct drm_encoder *encoder =
332 nv_drm_add_encoder(dev, hDisplays[i]);
333
334 if (IS_ERR(encoder)) {
335 NV_DRM_DEV_LOG_ERR(
336 nv_dev,
337 "Failed to add connector for NvKmsKapiDisplay 0x%08x",
338 hDisplays[i]);
339 }
340 }
341 }
342
343 nv_drm_free(hDisplays);
344 } else {
345 NV_DRM_DEV_LOG_ERR(
346 nv_dev,
347 "Failed to allocate memory for NvKmsKapiDisplay array");
348 }
349 }
350 }
351
352 #endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
353
354 /*!
355 * 'NV_DRM_OUT_FENCE_PTR' is an atomic per-plane property that clients can use
356 * to request an out-fence fd for a particular plane that's being flipped.
357 * 'NV_DRM_OUT_FENCE_PTR' does NOT have the same behavior as the standard
358 * 'OUT_FENCE_PTR' property - the fd that's returned via 'NV_DRM_OUT_FENCE_PTR'
359 * will only be signaled once the buffers in the corresponding flip are flipped
360 * away from.
361 * In order to use this property, client needs to call set property function
362 * with user mode pointer as value. Once driver have post syncpt fd from flip reply,
363 * it will copy post syncpt fd at location pointed by user mode pointer.
364 */
nv_drm_create_properties(struct nv_drm_device * nv_dev)365 static int nv_drm_create_properties(struct nv_drm_device *nv_dev)
366 {
367 struct drm_prop_enum_list enum_list[3] = { };
368 int i, len = 0;
369
370 for (i = 0; i < 3; i++) {
371 enum_list[len].type = i;
372 enum_list[len].name = nv_get_input_colorspace_name(i);
373 len++;
374 }
375
376 if (nv_dev->supportsSyncpts) {
377 nv_dev->nv_out_fence_property =
378 drm_property_create_range(nv_dev->dev, DRM_MODE_PROP_ATOMIC,
379 "NV_DRM_OUT_FENCE_PTR", 0, U64_MAX);
380 if (nv_dev->nv_out_fence_property == NULL) {
381 return -ENOMEM;
382 }
383 }
384
385 nv_dev->nv_input_colorspace_property =
386 drm_property_create_enum(nv_dev->dev, 0, "NV_INPUT_COLORSPACE",
387 enum_list, len);
388 if (nv_dev->nv_input_colorspace_property == NULL) {
389 NV_DRM_LOG_ERR("Failed to create NV_INPUT_COLORSPACE property");
390 return -ENOMEM;
391 }
392
393 #if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
394 nv_dev->nv_hdr_output_metadata_property =
395 drm_property_create(nv_dev->dev, DRM_MODE_PROP_BLOB,
396 "NV_HDR_STATIC_METADATA", 0);
397 if (nv_dev->nv_hdr_output_metadata_property == NULL) {
398 return -ENOMEM;
399 }
400 #endif
401
402 return 0;
403 }
404
405 #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
406 /*
407 * We can't just call drm_kms_helper_hotplug_event directly because
408 * fbdev_generic may attempt to set a mode from inside the hotplug event
409 * handler. Because kapi event handling runs on nvkms_kthread_q, this blocks
410 * other event processing including the flip completion notifier expected by
411 * nv_drm_atomic_commit.
412 *
413 * Defer hotplug event handling to a work item so that nvkms_kthread_q can
414 * continue processing events while a DRM modeset is in progress.
415 */
nv_drm_handle_hotplug_event(struct work_struct * work)416 static void nv_drm_handle_hotplug_event(struct work_struct *work)
417 {
418 struct delayed_work *dwork = to_delayed_work(work);
419 struct nv_drm_device *nv_dev =
420 container_of(dwork, struct nv_drm_device, hotplug_event_work);
421
422 drm_kms_helper_hotplug_event(nv_dev->dev);
423 }
424 #endif
425
nv_drm_load(struct drm_device * dev,unsigned long flags)426 static int nv_drm_load(struct drm_device *dev, unsigned long flags)
427 {
428 #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
429 struct NvKmsKapiDevice *pDevice;
430
431 struct NvKmsKapiAllocateDeviceParams allocateDeviceParams;
432 struct NvKmsKapiDeviceResourcesInfo resInfo;
433 #endif /* defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) */
434 #if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT)
435 NvU64 kind;
436 NvU64 gen;
437 int i;
438 #endif
439 int ret;
440
441 struct nv_drm_device *nv_dev = to_nv_device(dev);
442
443 NV_DRM_DEV_LOG_INFO(nv_dev, "Loading driver");
444
445 #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
446
447 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
448 return 0;
449 }
450
451 /* Allocate NvKmsKapiDevice from GPU ID */
452
453 memset(&allocateDeviceParams, 0, sizeof(allocateDeviceParams));
454
455 allocateDeviceParams.gpuId = nv_dev->gpu_info.gpu_id;
456
457 allocateDeviceParams.privateData = nv_dev;
458 allocateDeviceParams.eventCallback = nv_drm_event_callback;
459
460 pDevice = nvKms->allocateDevice(&allocateDeviceParams);
461
462 if (pDevice == NULL) {
463 NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to allocate NvKmsKapiDevice");
464 return -ENODEV;
465 }
466
467 /* Query information of resources available on device */
468
469 if (!nvKms->getDeviceResourcesInfo(pDevice, &resInfo)) {
470
471 nvKms->freeDevice(pDevice);
472
473 NV_DRM_DEV_LOG_ERR(
474 nv_dev,
475 "Failed to query NvKmsKapiDevice resources info");
476 return -ENODEV;
477 }
478
479 #if defined(NV_DRM_FBDEV_GENERIC_AVAILABLE)
480 /*
481 * If fbdev is enabled, take modeset ownership now before other DRM clients
482 * can take master (and thus NVKMS ownership).
483 */
484 if (nv_drm_fbdev_module_param) {
485 if (!nvKms->grabOwnership(pDevice)) {
486 nvKms->freeDevice(pDevice);
487 NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to grab NVKMS modeset ownership");
488 return -EBUSY;
489 }
490
491 nv_dev->hasFramebufferConsole = NV_TRUE;
492 }
493 #endif
494
495 mutex_lock(&nv_dev->lock);
496
497 /* Set NvKmsKapiDevice */
498
499 nv_dev->pDevice = pDevice;
500
501 nv_dev->pitchAlignment = resInfo.caps.pitchAlignment;
502
503 nv_dev->hasVideoMemory = resInfo.caps.hasVideoMemory;
504
505 nv_dev->genericPageKind = resInfo.caps.genericPageKind;
506
507 // Fermi-Volta use generation 0, Turing+ uses generation 2.
508 nv_dev->pageKindGeneration = (nv_dev->genericPageKind == 0x06) ? 2 : 0;
509
510 // Desktop GPUs and mobile GPUs Xavier and later use the same sector layout
511 nv_dev->sectorLayout = 1;
512
513 nv_dev->supportsSyncpts = resInfo.caps.supportsSyncpts;
514
515 nv_dev->semsurf_stride = resInfo.caps.semsurf.stride;
516
517 nv_dev->semsurf_max_submitted_offset =
518 resInfo.caps.semsurf.maxSubmittedOffset;
519
520 nv_dev->display_semaphores.count =
521 resInfo.caps.numDisplaySemaphores;
522 nv_dev->display_semaphores.next_index = 0;
523
524 nv_dev->requiresVrrSemaphores = resInfo.caps.requiresVrrSemaphores;
525
526 #if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT)
527 gen = nv_dev->pageKindGeneration;
528 kind = nv_dev->genericPageKind;
529
530 for (i = 0; i <= 5; i++) {
531 nv_dev->modifiers[i] =
532 /* Log2(block height) ----------------------------------+ *
533 * Page Kind ------------------------------------+ | *
534 * Gob Height/Page Kind Generation --------+ | | *
535 * Sector layout ---------------------+ | | | *
536 * Compression --------------------+ | | | | *
537 * | | | | | */
538 DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, gen, kind, 5 - i);
539 }
540
541 nv_dev->modifiers[i++] = DRM_FORMAT_MOD_LINEAR;
542 nv_dev->modifiers[i++] = DRM_FORMAT_MOD_INVALID;
543 #endif /* defined(NV_DRM_FORMAT_MODIFIERS_PRESENT) */
544
545 /* Initialize drm_device::mode_config */
546
547 nv_drm_init_mode_config(nv_dev, &resInfo);
548
549 ret = nv_drm_create_properties(nv_dev);
550 if (ret < 0) {
551 return -ENODEV;
552 }
553
554 if (!nvKms->declareEventInterest(
555 nv_dev->pDevice,
556 ((1 << NVKMS_EVENT_TYPE_DPY_CHANGED) |
557 (1 << NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED) |
558 (1 << NVKMS_EVENT_TYPE_FLIP_OCCURRED)))) {
559 NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to register event mask");
560 }
561
562 /* Add crtcs */
563
564 nv_drm_enumerate_crtcs_and_planes(nv_dev, &resInfo);
565
566 /* Add connectors and encoders */
567
568 nv_drm_enumerate_encoders_and_connectors(nv_dev);
569
570 #if !defined(NV_DRM_CRTC_STATE_HAS_NO_VBLANK)
571 drm_vblank_init(dev, dev->mode_config.num_crtc);
572 #endif
573
574 /*
575 * Trigger hot-plug processing, to update connection status of
576 * all HPD supported connectors.
577 */
578
579 drm_helper_hpd_irq_event(dev);
580
581 /* Enable event handling */
582
583 INIT_DELAYED_WORK(&nv_dev->hotplug_event_work, nv_drm_handle_hotplug_event);
584 atomic_set(&nv_dev->enable_event_handling, true);
585
586 init_waitqueue_head(&nv_dev->flip_event_wq);
587
588 mutex_unlock(&nv_dev->lock);
589
590 #endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
591
592 return 0;
593 }
594
__nv_drm_unload(struct drm_device * dev)595 static void __nv_drm_unload(struct drm_device *dev)
596 {
597 #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
598 struct NvKmsKapiDevice *pDevice = NULL;
599 #endif
600
601 struct nv_drm_device *nv_dev = to_nv_device(dev);
602
603 NV_DRM_DEV_LOG_INFO(nv_dev, "Unloading driver");
604
605 #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
606
607 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
608 return;
609 }
610
611 /* Release modeset ownership if fbdev is enabled */
612
613 #if defined(NV_DRM_FBDEV_GENERIC_AVAILABLE)
614 if (nv_dev->hasFramebufferConsole) {
615 drm_atomic_helper_shutdown(dev);
616 nvKms->releaseOwnership(nv_dev->pDevice);
617 }
618 #endif
619
620 cancel_delayed_work_sync(&nv_dev->hotplug_event_work);
621 mutex_lock(&nv_dev->lock);
622
623 WARN_ON(nv_dev->subOwnershipGranted);
624
625 /* Disable event handling */
626
627 atomic_set(&nv_dev->enable_event_handling, false);
628
629 /* Clean up output polling */
630
631 drm_kms_helper_poll_fini(dev);
632
633 /* Clean up mode configuration */
634
635 drm_mode_config_cleanup(dev);
636
637 if (!nvKms->declareEventInterest(nv_dev->pDevice, 0x0)) {
638 NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to stop event listening");
639 }
640
641 /* Unset NvKmsKapiDevice */
642
643 pDevice = nv_dev->pDevice;
644 nv_dev->pDevice = NULL;
645
646 mutex_unlock(&nv_dev->lock);
647
648 nvKms->freeDevice(pDevice);
649
650 #endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
651 }
652
653 #if defined(NV_DRM_DRIVER_UNLOAD_HAS_INT_RETURN_TYPE)
nv_drm_unload(struct drm_device * dev)654 static int nv_drm_unload(struct drm_device *dev)
655 {
656 __nv_drm_unload(dev);
657
658 return 0;
659 }
660 #else
nv_drm_unload(struct drm_device * dev)661 static void nv_drm_unload(struct drm_device *dev)
662 {
663 __nv_drm_unload(dev);
664 }
665 #endif
666
667 #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
668
__nv_drm_master_set(struct drm_device * dev,struct drm_file * file_priv,bool from_open)669 static int __nv_drm_master_set(struct drm_device *dev,
670 struct drm_file *file_priv, bool from_open)
671 {
672 struct nv_drm_device *nv_dev = to_nv_device(dev);
673
674 /*
675 * If this device is driving a framebuffer, then nvidia-drm already has
676 * modeset ownership. Otherwise, grab ownership now.
677 */
678 if (!nv_dev->hasFramebufferConsole &&
679 !nvKms->grabOwnership(nv_dev->pDevice)) {
680 return -EINVAL;
681 }
682
683 return 0;
684 }
685
686 #if defined(NV_DRM_DRIVER_SET_MASTER_HAS_INT_RETURN_TYPE)
nv_drm_master_set(struct drm_device * dev,struct drm_file * file_priv,bool from_open)687 static int nv_drm_master_set(struct drm_device *dev,
688 struct drm_file *file_priv, bool from_open)
689 {
690 return __nv_drm_master_set(dev, file_priv, from_open);
691 }
692 #else
nv_drm_master_set(struct drm_device * dev,struct drm_file * file_priv,bool from_open)693 static void nv_drm_master_set(struct drm_device *dev,
694 struct drm_file *file_priv, bool from_open)
695 {
696 if (__nv_drm_master_set(dev, file_priv, from_open) != 0) {
697 NV_DRM_DEV_LOG_ERR(to_nv_device(dev), "Failed to grab modeset ownership");
698 }
699 }
700 #endif
701
702
703 #if defined(NV_DRM_MASTER_DROP_HAS_FROM_RELEASE_ARG)
704 static
nv_drm_master_drop(struct drm_device * dev,struct drm_file * file_priv,bool from_release)705 void nv_drm_master_drop(struct drm_device *dev,
706 struct drm_file *file_priv, bool from_release)
707 #else
708 static
709 void nv_drm_master_drop(struct drm_device *dev, struct drm_file *file_priv)
710 #endif
711 {
712 struct nv_drm_device *nv_dev = to_nv_device(dev);
713 int err;
714
715 nv_drm_revoke_modeset_permission(dev, file_priv, 0);
716 nv_drm_revoke_sub_ownership(dev);
717
718 /*
719 * After dropping nvkms modeset onwership, it is not guaranteed that
720 * drm and nvkms modeset state will remain in sync. Therefore, disable
721 * all outputs and crtcs before dropping nvkms modeset ownership.
722 *
723 * First disable all active outputs atomically and then disable each crtc one
724 * by one, there is not helper function available to disable all crtcs
725 * atomically.
726 */
727
728 drm_modeset_lock_all(dev);
729
730 if ((err = nv_drm_atomic_helper_disable_all(
731 dev,
732 dev->mode_config.acquire_ctx)) != 0) {
733
734 NV_DRM_DEV_LOG_ERR(
735 nv_dev,
736 "nv_drm_atomic_helper_disable_all failed with error code %d !",
737 err);
738 }
739
740 drm_modeset_unlock_all(dev);
741
742 if (!nv_dev->hasFramebufferConsole) {
743 nvKms->releaseOwnership(nv_dev->pDevice);
744 }
745 }
746 #endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
747
748 #if defined(NV_DRM_BUS_PRESENT) || defined(NV_DRM_DRIVER_HAS_SET_BUSID)
nv_drm_pci_set_busid(struct drm_device * dev,struct drm_master * master)749 static int nv_drm_pci_set_busid(struct drm_device *dev,
750 struct drm_master *master)
751 {
752 struct nv_drm_device *nv_dev = to_nv_device(dev);
753
754 master->unique = nv_drm_asprintf("pci:%04x:%02x:%02x.%d",
755 nv_dev->gpu_info.pci_info.domain,
756 nv_dev->gpu_info.pci_info.bus,
757 nv_dev->gpu_info.pci_info.slot,
758 nv_dev->gpu_info.pci_info.function);
759
760 if (master->unique == NULL) {
761 return -ENOMEM;
762 }
763
764 master->unique_len = strlen(master->unique);
765
766 return 0;
767 }
768 #endif
769
nv_drm_get_dev_info_ioctl(struct drm_device * dev,void * data,struct drm_file * filep)770 static int nv_drm_get_dev_info_ioctl(struct drm_device *dev,
771 void *data, struct drm_file *filep)
772 {
773 struct nv_drm_device *nv_dev = to_nv_device(dev);
774 struct drm_nvidia_get_dev_info_params *params = data;
775
776 if (dev->primary == NULL) {
777 return -ENOENT;
778 }
779
780 params->gpu_id = nv_dev->gpu_info.gpu_id;
781 params->primary_index = dev->primary->index;
782 params->supports_alloc = false;
783 params->generic_page_kind = 0;
784 params->page_kind_generation = 0;
785 params->sector_layout = 0;
786 params->supports_sync_fd = false;
787 params->supports_semsurf = false;
788
789 #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
790 /* Memory allocation and semaphore surfaces are only supported
791 * if the modeset = 1 parameter is set */
792 if (nv_dev->pDevice != NULL) {
793 params->supports_alloc = true;
794 params->generic_page_kind = nv_dev->genericPageKind;
795 params->page_kind_generation = nv_dev->pageKindGeneration;
796 params->sector_layout = nv_dev->sectorLayout;
797
798 if (nv_dev->semsurf_stride != 0) {
799 params->supports_semsurf = true;
800 #if defined(NV_SYNC_FILE_GET_FENCE_PRESENT)
801 params->supports_sync_fd = true;
802 #endif /* defined(NV_SYNC_FILE_GET_FENCE_PRESENT) */
803 }
804 }
805 #endif /* defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) */
806
807 return 0;
808 }
809
nv_drm_get_drm_file_unique_id_ioctl(struct drm_device * dev,void * data,struct drm_file * filep)810 static int nv_drm_get_drm_file_unique_id_ioctl(struct drm_device *dev,
811 void *data, struct drm_file *filep)
812 {
813 struct drm_nvidia_get_drm_file_unique_id_params *params = data;
814 params->id = (u64)(filep->driver_priv);
815 return 0;
816 }
817
nv_drm_dmabuf_supported_ioctl(struct drm_device * dev,void * data,struct drm_file * filep)818 static int nv_drm_dmabuf_supported_ioctl(struct drm_device *dev,
819 void *data, struct drm_file *filep)
820 {
821 /* check the pDevice since this only gets set if modeset = 1
822 * which is a requirement for the dma_buf extension to work
823 */
824 struct nv_drm_device *nv_dev = to_nv_device(dev);
825 return nv_dev->pDevice ? 0 : -EINVAL;
826 }
827
828 static
nv_drm_get_client_capability_ioctl(struct drm_device * dev,void * data,struct drm_file * filep)829 int nv_drm_get_client_capability_ioctl(struct drm_device *dev,
830 void *data, struct drm_file *filep)
831 {
832 struct drm_nvidia_get_client_capability_params *params = data;
833
834 switch (params->capability) {
835 #if defined(DRM_CLIENT_CAP_STEREO_3D)
836 case DRM_CLIENT_CAP_STEREO_3D:
837 params->value = filep->stereo_allowed;
838 break;
839 #endif
840 #if defined(DRM_CLIENT_CAP_UNIVERSAL_PLANES)
841 case DRM_CLIENT_CAP_UNIVERSAL_PLANES:
842 params->value = filep->universal_planes;
843 break;
844 #endif
845 #if defined(DRM_CLIENT_CAP_ATOMIC)
846 case DRM_CLIENT_CAP_ATOMIC:
847 params->value = filep->atomic;
848 break;
849 #endif
850 default:
851 return -EINVAL;
852 }
853
854 return 0;
855 }
856
857 #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
nv_drm_connector_is_dpy_id(struct drm_connector * connector,NvU32 dpyId)858 static bool nv_drm_connector_is_dpy_id(struct drm_connector *connector,
859 NvU32 dpyId)
860 {
861 struct nv_drm_connector *nv_connector = to_nv_connector(connector);
862 return nv_connector->nv_detected_encoder &&
863 nv_connector->nv_detected_encoder->hDisplay == dpyId;
864 }
865
nv_drm_get_dpy_id_for_connector_id_ioctl(struct drm_device * dev,void * data,struct drm_file * filep)866 static int nv_drm_get_dpy_id_for_connector_id_ioctl(struct drm_device *dev,
867 void *data,
868 struct drm_file *filep)
869 {
870 struct drm_nvidia_get_dpy_id_for_connector_id_params *params = data;
871 struct drm_connector *connector;
872 struct nv_drm_connector *nv_connector;
873 int ret = 0;
874
875 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
876 return -EOPNOTSUPP;
877 }
878
879 // Importantly, drm_connector_lookup (with filep) will only return the
880 // connector if we are master, a lessee with the connector, or not master at
881 // all. It will return NULL if we are a lessee with other connectors.
882 connector = nv_drm_connector_lookup(dev, filep, params->connectorId);
883
884 if (!connector) {
885 return -EINVAL;
886 }
887
888 nv_connector = to_nv_connector(connector);
889 if (!nv_connector) {
890 ret = -EINVAL;
891 goto done;
892 }
893
894 if (!nv_connector->nv_detected_encoder) {
895 ret = -EINVAL;
896 goto done;
897 }
898
899 params->dpyId = nv_connector->nv_detected_encoder->hDisplay;
900
901 done:
902 nv_drm_connector_put(connector);
903 return ret;
904 }
905
nv_drm_get_connector_id_for_dpy_id_ioctl(struct drm_device * dev,void * data,struct drm_file * filep)906 static int nv_drm_get_connector_id_for_dpy_id_ioctl(struct drm_device *dev,
907 void *data,
908 struct drm_file *filep)
909 {
910 struct drm_nvidia_get_connector_id_for_dpy_id_params *params = data;
911 struct drm_connector *connector;
912 int ret = -EINVAL;
913 #if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
914 struct drm_connector_list_iter conn_iter;
915 #endif
916 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
917 return -EOPNOTSUPP;
918 }
919 #if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
920 nv_drm_connector_list_iter_begin(dev, &conn_iter);
921 #endif
922
923 /* Lookup for existing connector with same dpyId */
924 nv_drm_for_each_connector(connector, &conn_iter, dev) {
925 if (nv_drm_connector_is_dpy_id(connector, params->dpyId)) {
926 params->connectorId = connector->base.id;
927 ret = 0;
928 break;
929 }
930 }
931 #if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
932 nv_drm_connector_list_iter_end(&conn_iter);
933 #endif
934
935 return ret;
936 }
937
nv_drm_get_head_bit_from_connector(struct drm_connector * connector)938 static NvU32 nv_drm_get_head_bit_from_connector(struct drm_connector *connector)
939 {
940 struct nv_drm_connector *nv_connector = to_nv_connector(connector);
941
942 if (connector->state && connector->state->crtc) {
943 struct nv_drm_crtc *nv_crtc = to_nv_crtc(connector->state->crtc);
944 return NVBIT(nv_crtc->head);
945 } else if (nv_connector->nv_detected_encoder &&
946 nv_connector->nv_detected_encoder->base.crtc) {
947 struct nv_drm_crtc *nv_crtc =
948 to_nv_crtc(nv_connector->nv_detected_encoder->base.crtc);
949 return NVBIT(nv_crtc->head);
950 }
951
952 return 0;
953 }
954
nv_drm_grant_modeset_permission(struct drm_device * dev,struct drm_nvidia_grant_permissions_params * params,struct drm_file * filep)955 static int nv_drm_grant_modeset_permission(struct drm_device *dev,
956 struct drm_nvidia_grant_permissions_params *params,
957 struct drm_file *filep)
958 {
959 struct nv_drm_device *nv_dev = to_nv_device(dev);
960 struct nv_drm_connector *target_nv_connector = NULL;
961 struct nv_drm_crtc *target_nv_crtc = NULL;
962 struct drm_connector *connector, *target_connector = NULL;
963 struct drm_crtc *crtc;
964 NvU32 head = 0, freeHeadBits, targetHeadBit, possible_crtcs;
965 int ret = 0;
966 #if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
967 struct drm_connector_list_iter conn_iter;
968 #endif
969 #if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3
970 struct drm_modeset_acquire_ctx ctx;
971 DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE,
972 ret);
973 #else
974 mutex_lock(&dev->mode_config.mutex);
975 #endif
976
977 /* Get the connector for the dpyId. */
978 #if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
979 nv_drm_connector_list_iter_begin(dev, &conn_iter);
980 #endif
981 nv_drm_for_each_connector(connector, &conn_iter, dev) {
982 if (nv_drm_connector_is_dpy_id(connector, params->dpyId)) {
983 target_connector =
984 nv_drm_connector_lookup(dev, filep, connector->base.id);
985 break;
986 }
987 }
988 #if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
989 nv_drm_connector_list_iter_end(&conn_iter);
990 #endif
991
992 // Importantly, drm_connector_lookup/drm_crtc_find (with filep) will only
993 // return the object if we are master, a lessee with the object, or not
994 // master at all. It will return NULL if we are a lessee with other objects.
995 if (!target_connector) {
996 ret = -EINVAL;
997 goto done;
998 }
999 target_nv_connector = to_nv_connector(target_connector);
1000 possible_crtcs =
1001 target_nv_connector->nv_detected_encoder->base.possible_crtcs;
1002
1003 /* Target connector must not be previously granted. */
1004 if (target_nv_connector->modeset_permission_filep) {
1005 ret = -EINVAL;
1006 goto done;
1007 }
1008
1009 /* Add all heads that are owned and not already granted. */
1010 freeHeadBits = 0;
1011 nv_drm_for_each_crtc(crtc, dev) {
1012 struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
1013 if (nv_drm_crtc_find(dev, filep, crtc->base.id) &&
1014 !nv_crtc->modeset_permission_filep &&
1015 (drm_crtc_mask(crtc) & possible_crtcs)) {
1016 freeHeadBits |= NVBIT(nv_crtc->head);
1017 }
1018 }
1019
1020 targetHeadBit = nv_drm_get_head_bit_from_connector(target_connector);
1021 if (targetHeadBit & freeHeadBits) {
1022 /* If a crtc is already being used by this connector, use it. */
1023 freeHeadBits = targetHeadBit;
1024 } else {
1025 /* Otherwise, remove heads that are in use by other connectors. */
1026 #if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
1027 nv_drm_connector_list_iter_begin(dev, &conn_iter);
1028 #endif
1029 nv_drm_for_each_connector(connector, &conn_iter, dev) {
1030 freeHeadBits &= ~nv_drm_get_head_bit_from_connector(connector);
1031 }
1032 #if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
1033 nv_drm_connector_list_iter_end(&conn_iter);
1034 #endif
1035 }
1036
1037 /* Fail if no heads are available. */
1038 if (!freeHeadBits) {
1039 ret = -EINVAL;
1040 goto done;
1041 }
1042
1043 /*
1044 * Loop through the crtc again and find a matching head.
1045 * Record the filep that is using the crtc and the connector.
1046 */
1047 nv_drm_for_each_crtc(crtc, dev) {
1048 struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
1049 if (freeHeadBits & NVBIT(nv_crtc->head)) {
1050 target_nv_crtc = nv_crtc;
1051 head = nv_crtc->head;
1052 break;
1053 }
1054 }
1055
1056 if (!nvKms->grantPermissions(params->fd, nv_dev->pDevice, head,
1057 params->dpyId)) {
1058 ret = -EINVAL;
1059 goto done;
1060 }
1061
1062 target_nv_connector->modeset_permission_crtc = target_nv_crtc;
1063 target_nv_connector->modeset_permission_filep = filep;
1064 target_nv_crtc->modeset_permission_filep = filep;
1065
1066 done:
1067 if (target_connector) {
1068 nv_drm_connector_put(target_connector);
1069 }
1070
1071 #if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3
1072 DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
1073 #else
1074 mutex_unlock(&dev->mode_config.mutex);
1075 #endif
1076
1077 return ret;
1078 }
1079
nv_drm_grant_sub_ownership(struct drm_device * dev,struct drm_nvidia_grant_permissions_params * params)1080 static int nv_drm_grant_sub_ownership(struct drm_device *dev,
1081 struct drm_nvidia_grant_permissions_params *params)
1082 {
1083 int ret = -EINVAL;
1084 struct nv_drm_device *nv_dev = to_nv_device(dev);
1085 struct drm_modeset_acquire_ctx *pctx;
1086 #if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3
1087 struct drm_modeset_acquire_ctx ctx;
1088 DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE,
1089 ret);
1090 pctx = &ctx;
1091 #else
1092 mutex_lock(&dev->mode_config.mutex);
1093 pctx = dev->mode_config.acquire_ctx;
1094 #endif
1095
1096 if (nv_dev->subOwnershipGranted ||
1097 !nvKms->grantSubOwnership(params->fd, nv_dev->pDevice)) {
1098 goto done;
1099 }
1100
1101 /*
1102 * When creating an ownership grant, shut down all heads and disable flip
1103 * notifications.
1104 */
1105 ret = nv_drm_atomic_helper_disable_all(dev, pctx);
1106 if (ret != 0) {
1107 NV_DRM_DEV_LOG_ERR(
1108 nv_dev,
1109 "nv_drm_atomic_helper_disable_all failed with error code %d!",
1110 ret);
1111 }
1112
1113 atomic_set(&nv_dev->enable_event_handling, false);
1114 nv_dev->subOwnershipGranted = NV_TRUE;
1115
1116 ret = 0;
1117
1118 done:
1119 #if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3
1120 DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
1121 #else
1122 mutex_unlock(&dev->mode_config.mutex);
1123 #endif
1124 return 0;
1125 }
1126
nv_drm_grant_permission_ioctl(struct drm_device * dev,void * data,struct drm_file * filep)1127 static int nv_drm_grant_permission_ioctl(struct drm_device *dev, void *data,
1128 struct drm_file *filep)
1129 {
1130 struct drm_nvidia_grant_permissions_params *params = data;
1131
1132 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
1133 return -EOPNOTSUPP;
1134 }
1135
1136 if (params->type == NV_DRM_PERMISSIONS_TYPE_MODESET) {
1137 return nv_drm_grant_modeset_permission(dev, params, filep);
1138 } else if (params->type == NV_DRM_PERMISSIONS_TYPE_SUB_OWNER) {
1139 return nv_drm_grant_sub_ownership(dev, params);
1140 }
1141
1142 return -EINVAL;
1143 }
1144
1145 static int
nv_drm_atomic_disable_connector(struct drm_atomic_state * state,struct nv_drm_connector * nv_connector)1146 nv_drm_atomic_disable_connector(struct drm_atomic_state *state,
1147 struct nv_drm_connector *nv_connector)
1148 {
1149 struct drm_crtc_state *crtc_state;
1150 struct drm_connector_state *connector_state;
1151 int ret = 0;
1152
1153 if (nv_connector->modeset_permission_crtc) {
1154 crtc_state = drm_atomic_get_crtc_state(
1155 state, &nv_connector->modeset_permission_crtc->base);
1156 if (!crtc_state) {
1157 return -EINVAL;
1158 }
1159
1160 crtc_state->active = false;
1161 ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL);
1162 if (ret < 0) {
1163 return ret;
1164 }
1165 }
1166
1167 connector_state = drm_atomic_get_connector_state(state, &nv_connector->base);
1168 if (!connector_state) {
1169 return -EINVAL;
1170 }
1171
1172 return drm_atomic_set_crtc_for_connector(connector_state, NULL);
1173 }
1174
nv_drm_revoke_modeset_permission(struct drm_device * dev,struct drm_file * filep,NvU32 dpyId)1175 static int nv_drm_revoke_modeset_permission(struct drm_device *dev,
1176 struct drm_file *filep, NvU32 dpyId)
1177 {
1178 struct drm_modeset_acquire_ctx *pctx;
1179 struct drm_atomic_state *state;
1180 struct drm_connector *connector;
1181 struct drm_crtc *crtc;
1182 int ret = 0;
1183 #if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
1184 struct drm_connector_list_iter conn_iter;
1185 #endif
1186 #if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3
1187 struct drm_modeset_acquire_ctx ctx;
1188 DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE,
1189 ret);
1190 pctx = &ctx;
1191 #else
1192 mutex_lock(&dev->mode_config.mutex);
1193 pctx = dev->mode_config.acquire_ctx;
1194 #endif
1195
1196 state = drm_atomic_state_alloc(dev);
1197 if (!state) {
1198 ret = -ENOMEM;
1199 goto done;
1200 }
1201 state->acquire_ctx = pctx;
1202
1203 /*
1204 * If dpyId is set, only revoke those specific resources. Otherwise,
1205 * it is from closing the file so revoke all resources for that filep.
1206 */
1207 #if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
1208 nv_drm_connector_list_iter_begin(dev, &conn_iter);
1209 #endif
1210 nv_drm_for_each_connector(connector, &conn_iter, dev) {
1211 struct nv_drm_connector *nv_connector = to_nv_connector(connector);
1212 if (nv_connector->modeset_permission_filep == filep &&
1213 (!dpyId || nv_drm_connector_is_dpy_id(connector, dpyId))) {
1214 ret = nv_drm_atomic_disable_connector(state, nv_connector);
1215 if (ret < 0) {
1216 goto done;
1217 }
1218
1219 // Continue trying to revoke as much as possible.
1220 nv_drm_connector_revoke_permissions(dev, nv_connector);
1221 }
1222 }
1223 #if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
1224 nv_drm_connector_list_iter_end(&conn_iter);
1225 #endif
1226
1227 nv_drm_for_each_crtc(crtc, dev) {
1228 struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
1229 if (nv_crtc->modeset_permission_filep == filep && !dpyId) {
1230 nv_crtc->modeset_permission_filep = NULL;
1231 }
1232 }
1233
1234 ret = drm_atomic_commit(state);
1235 done:
1236 #if defined(NV_DRM_ATOMIC_STATE_REF_COUNTING_PRESENT)
1237 drm_atomic_state_put(state);
1238 #else
1239 if (ret != 0) {
1240 drm_atomic_state_free(state);
1241 } else {
1242 /*
1243 * In case of success, drm_atomic_commit() takes care to cleanup and
1244 * free @state.
1245 *
1246 * Comment placed above drm_atomic_commit() says: The caller must not
1247 * free or in any other way access @state. If the function fails then
1248 * the caller must clean up @state itself.
1249 */
1250 }
1251 #endif
1252
1253 #if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3
1254 DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
1255 #else
1256 mutex_unlock(&dev->mode_config.mutex);
1257 #endif
1258
1259 return ret;
1260 }
1261
nv_drm_revoke_sub_ownership(struct drm_device * dev)1262 static int nv_drm_revoke_sub_ownership(struct drm_device *dev)
1263 {
1264 int ret = -EINVAL;
1265 struct nv_drm_device *nv_dev = to_nv_device(dev);
1266 #if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3
1267 struct drm_modeset_acquire_ctx ctx;
1268 DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE,
1269 ret);
1270 #else
1271 mutex_lock(&dev->mode_config.mutex);
1272 #endif
1273
1274 if (!nv_dev->subOwnershipGranted) {
1275 goto done;
1276 }
1277
1278 if (!nvKms->revokeSubOwnership(nv_dev->pDevice)) {
1279 NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to revoke sub-ownership from NVKMS");
1280 goto done;
1281 }
1282
1283 nv_dev->subOwnershipGranted = NV_FALSE;
1284 atomic_set(&nv_dev->enable_event_handling, true);
1285 ret = 0;
1286
1287 done:
1288 #if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3
1289 DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
1290 #else
1291 mutex_unlock(&dev->mode_config.mutex);
1292 #endif
1293 return ret;
1294 }
1295
nv_drm_revoke_permission_ioctl(struct drm_device * dev,void * data,struct drm_file * filep)1296 static int nv_drm_revoke_permission_ioctl(struct drm_device *dev, void *data,
1297 struct drm_file *filep)
1298 {
1299 struct drm_nvidia_revoke_permissions_params *params = data;
1300
1301 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
1302 return -EOPNOTSUPP;
1303 }
1304
1305 if (params->type == NV_DRM_PERMISSIONS_TYPE_MODESET) {
1306 if (!params->dpyId) {
1307 return -EINVAL;
1308 }
1309 return nv_drm_revoke_modeset_permission(dev, filep, params->dpyId);
1310 } else if (params->type == NV_DRM_PERMISSIONS_TYPE_SUB_OWNER) {
1311 return nv_drm_revoke_sub_ownership(dev);
1312 }
1313
1314 return -EINVAL;
1315 }
1316
nv_drm_postclose(struct drm_device * dev,struct drm_file * filep)1317 static void nv_drm_postclose(struct drm_device *dev, struct drm_file *filep)
1318 {
1319 /*
1320 * Some systems like android can reach here without initializing the
1321 * device, so check for that.
1322 */
1323 if (dev->mode_config.num_crtc > 0 &&
1324 dev->mode_config.crtc_list.next != NULL &&
1325 dev->mode_config.crtc_list.prev != NULL &&
1326 dev->mode_config.num_connector > 0 &&
1327 dev->mode_config.connector_list.next != NULL &&
1328 dev->mode_config.connector_list.prev != NULL) {
1329 nv_drm_revoke_modeset_permission(dev, filep, 0);
1330 }
1331 }
1332 #endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
1333
nv_drm_open(struct drm_device * dev,struct drm_file * filep)1334 static int nv_drm_open(struct drm_device *dev, struct drm_file *filep)
1335 {
1336 _Static_assert(sizeof(filep->driver_priv) >= sizeof(u64),
1337 "filep->driver_priv can not hold an u64");
1338 static atomic64_t id = ATOMIC_INIT(0);
1339
1340 filep->driver_priv = (void *)atomic64_inc_return(&id);
1341
1342 return 0;
1343 }
1344
1345 #if defined(NV_DRM_MASTER_HAS_LEASES)
nv_drm_find_lessee(struct drm_master * master,int lessee_id)1346 static struct drm_master *nv_drm_find_lessee(struct drm_master *master,
1347 int lessee_id)
1348 {
1349 int object;
1350 void *entry;
1351
1352 while (master->lessor != NULL) {
1353 master = master->lessor;
1354 }
1355
1356 idr_for_each_entry(&master->lessee_idr, entry, object)
1357 {
1358 if (object == lessee_id) {
1359 return entry;
1360 }
1361 }
1362
1363 return NULL;
1364 }
1365
nv_drm_get_revoked_objects(struct drm_device * dev,struct drm_file * filep,unsigned int cmd,unsigned long arg,int ** objects,int * objects_count)1366 static void nv_drm_get_revoked_objects(struct drm_device *dev,
1367 struct drm_file *filep, unsigned int cmd,
1368 unsigned long arg, int **objects,
1369 int *objects_count)
1370 {
1371 unsigned int ioc_size;
1372 struct drm_mode_revoke_lease revoke_lease;
1373 struct drm_master *lessor, *lessee;
1374 void *entry;
1375 int *objs;
1376 int obj, obj_count, obj_i;
1377
1378 ioc_size = _IOC_SIZE(cmd);
1379 if (ioc_size > sizeof(revoke_lease)) {
1380 return;
1381 }
1382
1383 if (copy_from_user(&revoke_lease, (void __user *)arg, ioc_size) != 0) {
1384 return;
1385 }
1386
1387 lessor = nv_drm_file_get_master(filep);
1388 if (lessor == NULL) {
1389 return;
1390 }
1391
1392 mutex_lock(&dev->mode_config.idr_mutex);
1393 lessee = nv_drm_find_lessee(lessor, revoke_lease.lessee_id);
1394
1395 if (lessee == NULL) {
1396 goto done;
1397 }
1398
1399 obj_count = 0;
1400 idr_for_each_entry(&lessee->leases, entry, obj) {
1401 ++obj_count;
1402 }
1403 if (obj_count == 0) {
1404 goto done;
1405 }
1406
1407 objs = nv_drm_calloc(obj_count, sizeof(int));
1408 if (objs == NULL) {
1409 goto done;
1410 }
1411
1412 obj_i = 0;
1413 idr_for_each_entry(&lessee->leases, entry, obj) {
1414 objs[obj_i++] = obj;
1415 }
1416 *objects = objs;
1417 *objects_count = obj_count;
1418
1419 done:
1420 mutex_unlock(&dev->mode_config.idr_mutex);
1421 drm_master_put(&lessor);
1422 }
1423
nv_drm_is_in_objects(int object,int * objects,int objects_count)1424 static bool nv_drm_is_in_objects(int object, int *objects, int objects_count)
1425 {
1426 int i;
1427 for (i = 0; i < objects_count; ++i) {
1428 if (objects[i] == object) {
1429 return true;
1430 }
1431 }
1432 return false;
1433 }
1434
nv_drm_finish_revoking_objects(struct drm_device * dev,struct drm_file * filep,int * objects,int objects_count)1435 static void nv_drm_finish_revoking_objects(struct drm_device *dev,
1436 struct drm_file *filep, int *objects,
1437 int objects_count)
1438 {
1439 struct drm_connector *connector;
1440 struct drm_crtc *crtc;
1441 #if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
1442 struct drm_connector_list_iter conn_iter;
1443 #endif
1444 #if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3
1445 int ret = 0;
1446 struct drm_modeset_acquire_ctx ctx;
1447 DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE,
1448 ret);
1449 #else
1450 mutex_lock(&dev->mode_config.mutex);
1451 #endif
1452
1453 #if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
1454 nv_drm_connector_list_iter_begin(dev, &conn_iter);
1455 #endif
1456 nv_drm_for_each_connector(connector, &conn_iter, dev) {
1457 struct nv_drm_connector *nv_connector = to_nv_connector(connector);
1458 if (nv_connector->modeset_permission_filep &&
1459 nv_drm_is_in_objects(connector->base.id, objects, objects_count)) {
1460 nv_drm_connector_revoke_permissions(dev, nv_connector);
1461 }
1462 }
1463 #if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
1464 nv_drm_connector_list_iter_end(&conn_iter);
1465 #endif
1466
1467 nv_drm_for_each_crtc(crtc, dev) {
1468 struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
1469 if (nv_crtc->modeset_permission_filep &&
1470 nv_drm_is_in_objects(crtc->base.id, objects, objects_count)) {
1471 nv_crtc->modeset_permission_filep = NULL;
1472 }
1473 }
1474
1475 #if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3
1476 DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
1477 #else
1478 mutex_unlock(&dev->mode_config.mutex);
1479 #endif
1480 }
1481 #endif /* NV_DRM_MASTER_HAS_LEASES */
1482
1483 #if defined(NV_DRM_BUS_PRESENT)
1484
1485 #if defined(NV_DRM_BUS_HAS_GET_IRQ)
nv_drm_bus_get_irq(struct drm_device * dev)1486 static int nv_drm_bus_get_irq(struct drm_device *dev)
1487 {
1488 return 0;
1489 }
1490 #endif
1491
1492 #if defined(NV_DRM_BUS_HAS_GET_NAME)
nv_drm_bus_get_name(struct drm_device * dev)1493 static const char *nv_drm_bus_get_name(struct drm_device *dev)
1494 {
1495 return "nvidia-drm";
1496 }
1497 #endif
1498
1499 static struct drm_bus nv_drm_bus = {
1500 #if defined(NV_DRM_BUS_HAS_BUS_TYPE)
1501 .bus_type = DRIVER_BUS_PCI,
1502 #endif
1503 #if defined(NV_DRM_BUS_HAS_GET_IRQ)
1504 .get_irq = nv_drm_bus_get_irq,
1505 #endif
1506 #if defined(NV_DRM_BUS_HAS_GET_NAME)
1507 .get_name = nv_drm_bus_get_name,
1508 #endif
1509 .set_busid = nv_drm_pci_set_busid,
1510 };
1511
1512 #endif /* NV_DRM_BUS_PRESENT */
1513
1514 /*
1515 * Wrapper around drm_ioctl to hook in to upstream ioctl.
1516 *
1517 * Currently used to add additional handling to REVOKE_LEASE.
1518 */
nv_drm_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)1519 static long nv_drm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1520 {
1521 long retcode;
1522
1523 #if defined(NV_DRM_MASTER_HAS_LEASES)
1524 struct drm_file *file_priv = filp->private_data;
1525 struct drm_device *dev = file_priv->minor->dev;
1526 int *objects = NULL;
1527 int objects_count = 0;
1528
1529 if (cmd == DRM_IOCTL_MODE_REVOKE_LEASE) {
1530 // Save the revoked objects before revoking.
1531 nv_drm_get_revoked_objects(dev, file_priv, cmd, arg, &objects,
1532 &objects_count);
1533 }
1534 #endif
1535
1536 retcode = drm_ioctl(filp, cmd, arg);
1537
1538 #if defined(NV_DRM_MASTER_HAS_LEASES)
1539 if (cmd == DRM_IOCTL_MODE_REVOKE_LEASE && objects) {
1540 if (retcode == 0) {
1541 // If revoking was successful, finish revoking the objects.
1542 nv_drm_finish_revoking_objects(dev, file_priv, objects,
1543 objects_count);
1544 }
1545 nv_drm_free(objects);
1546 }
1547 #endif
1548
1549 return retcode;
1550 }
1551
1552 static const struct file_operations nv_drm_fops = {
1553 .owner = THIS_MODULE,
1554
1555 .open = drm_open,
1556 .release = drm_release,
1557 .unlocked_ioctl = nv_drm_ioctl,
1558 #if defined(CONFIG_COMPAT)
1559 .compat_ioctl = drm_compat_ioctl,
1560 #endif
1561
1562 #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
1563 .mmap = nv_drm_mmap,
1564 #endif
1565
1566 .poll = drm_poll,
1567 .read = drm_read,
1568
1569 .llseek = noop_llseek,
1570 };
1571
1572 static const struct drm_ioctl_desc nv_drm_ioctls[] = {
1573 #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
1574 DRM_IOCTL_DEF_DRV(NVIDIA_GEM_IMPORT_NVKMS_MEMORY,
1575 nv_drm_gem_import_nvkms_memory_ioctl,
1576 DRM_RENDER_ALLOW|DRM_UNLOCKED),
1577 #endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
1578
1579 DRM_IOCTL_DEF_DRV(NVIDIA_GEM_IMPORT_USERSPACE_MEMORY,
1580 nv_drm_gem_import_userspace_memory_ioctl,
1581 DRM_RENDER_ALLOW|DRM_UNLOCKED),
1582 DRM_IOCTL_DEF_DRV(NVIDIA_GEM_MAP_OFFSET,
1583 nv_drm_gem_map_offset_ioctl,
1584 DRM_RENDER_ALLOW|DRM_UNLOCKED),
1585 DRM_IOCTL_DEF_DRV(NVIDIA_GET_DEV_INFO,
1586 nv_drm_get_dev_info_ioctl,
1587 DRM_RENDER_ALLOW|DRM_UNLOCKED),
1588 DRM_IOCTL_DEF_DRV(NVIDIA_GET_DRM_FILE_UNIQUE_ID,
1589 nv_drm_get_drm_file_unique_id_ioctl,
1590 DRM_RENDER_ALLOW|DRM_UNLOCKED),
1591
1592 #if defined(NV_DRM_FENCE_AVAILABLE)
1593 DRM_IOCTL_DEF_DRV(NVIDIA_FENCE_SUPPORTED,
1594 nv_drm_fence_supported_ioctl,
1595 DRM_RENDER_ALLOW|DRM_UNLOCKED),
1596 DRM_IOCTL_DEF_DRV(NVIDIA_PRIME_FENCE_CONTEXT_CREATE,
1597 nv_drm_prime_fence_context_create_ioctl,
1598 DRM_RENDER_ALLOW|DRM_UNLOCKED),
1599 DRM_IOCTL_DEF_DRV(NVIDIA_GEM_PRIME_FENCE_ATTACH,
1600 nv_drm_gem_prime_fence_attach_ioctl,
1601 DRM_RENDER_ALLOW|DRM_UNLOCKED),
1602 DRM_IOCTL_DEF_DRV(NVIDIA_SEMSURF_FENCE_CTX_CREATE,
1603 nv_drm_semsurf_fence_ctx_create_ioctl,
1604 DRM_RENDER_ALLOW|DRM_UNLOCKED),
1605 DRM_IOCTL_DEF_DRV(NVIDIA_SEMSURF_FENCE_CREATE,
1606 nv_drm_semsurf_fence_create_ioctl,
1607 DRM_RENDER_ALLOW|DRM_UNLOCKED),
1608 DRM_IOCTL_DEF_DRV(NVIDIA_SEMSURF_FENCE_WAIT,
1609 nv_drm_semsurf_fence_wait_ioctl,
1610 DRM_RENDER_ALLOW|DRM_UNLOCKED),
1611 DRM_IOCTL_DEF_DRV(NVIDIA_SEMSURF_FENCE_ATTACH,
1612 nv_drm_semsurf_fence_attach_ioctl,
1613 DRM_RENDER_ALLOW|DRM_UNLOCKED),
1614 #endif
1615
1616 /*
1617 * DRM_UNLOCKED is implicit for all non-legacy DRM driver IOCTLs since Linux
1618 * v4.10 commit fa5386459f06 "drm: Used DRM_LEGACY for all legacy functions"
1619 * (Linux v4.4 commit ea487835e887 "drm: Enforce unlocked ioctl operation
1620 * for kms driver ioctls" previously did it only for drivers that set the
1621 * DRM_MODESET flag), so this will race with SET_CLIENT_CAP. Linux v4.11
1622 * commit dcf727ab5d17 "drm: setclientcap doesn't need the drm BKL" also
1623 * removed locking from SET_CLIENT_CAP so there is no use attempting to lock
1624 * manually. The latter commit acknowledges that this can expose userspace
1625 * to inconsistent behavior when racing with itself, but accepts that risk.
1626 */
1627 DRM_IOCTL_DEF_DRV(NVIDIA_GET_CLIENT_CAPABILITY,
1628 nv_drm_get_client_capability_ioctl,
1629 0),
1630
1631 #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
1632 DRM_IOCTL_DEF_DRV(NVIDIA_GET_CRTC_CRC32,
1633 nv_drm_get_crtc_crc32_ioctl,
1634 DRM_RENDER_ALLOW|DRM_UNLOCKED),
1635 DRM_IOCTL_DEF_DRV(NVIDIA_GET_CRTC_CRC32_V2,
1636 nv_drm_get_crtc_crc32_v2_ioctl,
1637 DRM_RENDER_ALLOW|DRM_UNLOCKED),
1638 DRM_IOCTL_DEF_DRV(NVIDIA_GEM_EXPORT_NVKMS_MEMORY,
1639 nv_drm_gem_export_nvkms_memory_ioctl,
1640 DRM_RENDER_ALLOW|DRM_UNLOCKED),
1641 DRM_IOCTL_DEF_DRV(NVIDIA_GEM_ALLOC_NVKMS_MEMORY,
1642 nv_drm_gem_alloc_nvkms_memory_ioctl,
1643 DRM_RENDER_ALLOW|DRM_UNLOCKED),
1644 DRM_IOCTL_DEF_DRV(NVIDIA_GEM_EXPORT_DMABUF_MEMORY,
1645 nv_drm_gem_export_dmabuf_memory_ioctl,
1646 DRM_RENDER_ALLOW|DRM_UNLOCKED),
1647 DRM_IOCTL_DEF_DRV(NVIDIA_GEM_IDENTIFY_OBJECT,
1648 nv_drm_gem_identify_object_ioctl,
1649 DRM_RENDER_ALLOW|DRM_UNLOCKED),
1650 DRM_IOCTL_DEF_DRV(NVIDIA_DMABUF_SUPPORTED,
1651 nv_drm_dmabuf_supported_ioctl,
1652 DRM_RENDER_ALLOW|DRM_UNLOCKED),
1653 DRM_IOCTL_DEF_DRV(NVIDIA_GET_DPY_ID_FOR_CONNECTOR_ID,
1654 nv_drm_get_dpy_id_for_connector_id_ioctl,
1655 DRM_RENDER_ALLOW|DRM_UNLOCKED),
1656 DRM_IOCTL_DEF_DRV(NVIDIA_GET_CONNECTOR_ID_FOR_DPY_ID,
1657 nv_drm_get_connector_id_for_dpy_id_ioctl,
1658 DRM_RENDER_ALLOW|DRM_UNLOCKED),
1659 DRM_IOCTL_DEF_DRV(NVIDIA_GRANT_PERMISSIONS,
1660 nv_drm_grant_permission_ioctl,
1661 DRM_UNLOCKED|DRM_MASTER),
1662 DRM_IOCTL_DEF_DRV(NVIDIA_REVOKE_PERMISSIONS,
1663 nv_drm_revoke_permission_ioctl,
1664 DRM_UNLOCKED|DRM_MASTER),
1665 #endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
1666 };
1667
1668 static struct drm_driver nv_drm_driver = {
1669
1670 .driver_features =
1671 #if defined(NV_DRM_DRIVER_PRIME_FLAG_PRESENT)
1672 DRIVER_PRIME |
1673 #endif
1674 #if defined(NV_DRM_SYNCOBJ_FEATURES_PRESENT)
1675 DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE |
1676 #endif
1677 DRIVER_GEM | DRIVER_RENDER,
1678
1679 #if defined(NV_DRM_DRIVER_HAS_GEM_FREE_OBJECT)
1680 .gem_free_object = nv_drm_gem_free,
1681 #endif
1682
1683 .ioctls = nv_drm_ioctls,
1684 .num_ioctls = ARRAY_SIZE(nv_drm_ioctls),
1685
1686 /*
1687 * Linux kernel v6.6 commit 71a7974ac701 ("drm/prime: Unexport helpers
1688 * for fd/handle conversion") unexports drm_gem_prime_handle_to_fd() and
1689 * drm_gem_prime_fd_to_handle().
1690 *
1691 * Prior Linux kernel v6.6 commit 6b85aa68d9d5 ("drm: Enable PRIME
1692 * import/export for all drivers") made these helpers the default when
1693 * .prime_handle_to_fd / .prime_fd_to_handle are unspecified, so it's fine
1694 * to just skip specifying them if the helpers aren't present.
1695 */
1696 #if NV_IS_EXPORT_SYMBOL_PRESENT_drm_gem_prime_handle_to_fd
1697 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1698 #endif
1699 #if NV_IS_EXPORT_SYMBOL_PRESENT_drm_gem_prime_fd_to_handle
1700 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1701 #endif
1702
1703 .gem_prime_import = nv_drm_gem_prime_import,
1704 .gem_prime_import_sg_table = nv_drm_gem_prime_import_sg_table,
1705
1706 #if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_CALLBACKS)
1707 .gem_prime_export = drm_gem_prime_export,
1708 .gem_prime_get_sg_table = nv_drm_gem_prime_get_sg_table,
1709 .gem_prime_vmap = nv_drm_gem_prime_vmap,
1710 .gem_prime_vunmap = nv_drm_gem_prime_vunmap,
1711
1712 .gem_vm_ops = &nv_drm_gem_vma_ops,
1713 #endif
1714
1715 #if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ)
1716 .gem_prime_res_obj = nv_drm_gem_prime_res_obj,
1717 #endif
1718
1719 #if defined(NV_DRM_DRIVER_HAS_SET_BUSID)
1720 .set_busid = nv_drm_pci_set_busid,
1721 #endif
1722
1723 .load = nv_drm_load,
1724 .unload = nv_drm_unload,
1725 #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
1726 .postclose = nv_drm_postclose,
1727 #endif
1728 .open = nv_drm_open,
1729
1730 .fops = &nv_drm_fops,
1731
1732 #if defined(NV_DRM_BUS_PRESENT)
1733 .bus = &nv_drm_bus,
1734 #endif
1735
1736 .name = "nvidia-drm",
1737
1738 .desc = "NVIDIA DRM driver",
1739 .date = "20160202",
1740
1741 #if defined(NV_DRM_DRIVER_HAS_DEVICE_LIST)
1742 .device_list = LIST_HEAD_INIT(nv_drm_driver.device_list),
1743 #elif defined(NV_DRM_DRIVER_HAS_LEGACY_DEV_LIST)
1744 .legacy_dev_list = LIST_HEAD_INIT(nv_drm_driver.legacy_dev_list),
1745 #endif
1746 };
1747
1748
1749 /*
1750 * Update the global nv_drm_driver for the intended features.
1751 *
1752 * It defaults to PRIME-only, but is upgraded to atomic modeset if the
1753 * kernel supports atomic modeset and the 'modeset' kernel module
1754 * parameter is true.
1755 */
nv_drm_update_drm_driver_features(void)1756 void nv_drm_update_drm_driver_features(void)
1757 {
1758 #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
1759
1760 if (!nv_drm_modeset_module_param) {
1761 return;
1762 }
1763
1764 nv_drm_driver.driver_features |= DRIVER_MODESET | DRIVER_ATOMIC;
1765
1766 nv_drm_driver.master_set = nv_drm_master_set;
1767 nv_drm_driver.master_drop = nv_drm_master_drop;
1768
1769 nv_drm_driver.dumb_create = nv_drm_dumb_create;
1770 nv_drm_driver.dumb_map_offset = nv_drm_dumb_map_offset;
1771 #if defined(NV_DRM_DRIVER_HAS_DUMB_DESTROY)
1772 nv_drm_driver.dumb_destroy = nv_drm_dumb_destroy;
1773 #endif /* NV_DRM_DRIVER_HAS_DUMB_DESTROY */
1774 #endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
1775 }
1776
1777
1778
1779 /*
1780 * Helper function for allocate/register DRM device for given NVIDIA GPU ID.
1781 */
nv_drm_register_drm_device(const nv_gpu_info_t * gpu_info)1782 void nv_drm_register_drm_device(const nv_gpu_info_t *gpu_info)
1783 {
1784 struct nv_drm_device *nv_dev = NULL;
1785 struct drm_device *dev = NULL;
1786 struct device *device = gpu_info->os_device_ptr;
1787 bool bus_is_pci;
1788
1789 DRM_DEBUG(
1790 "Registering device for NVIDIA GPU ID 0x08%x",
1791 gpu_info->gpu_id);
1792
1793 /* Allocate NVIDIA-DRM device */
1794
1795 nv_dev = nv_drm_calloc(1, sizeof(*nv_dev));
1796
1797 if (nv_dev == NULL) {
1798 NV_DRM_LOG_ERR(
1799 "Failed to allocate memory for NVIDIA-DRM device object");
1800 return;
1801 }
1802
1803 nv_dev->gpu_info = *gpu_info;
1804
1805 #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
1806 mutex_init(&nv_dev->lock);
1807 #endif
1808
1809 /* Allocate DRM device */
1810
1811 dev = drm_dev_alloc(&nv_drm_driver, device);
1812
1813 if (dev == NULL) {
1814 NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to allocate device");
1815 goto failed_drm_alloc;
1816 }
1817
1818 dev->dev_private = nv_dev;
1819 nv_dev->dev = dev;
1820
1821 bus_is_pci =
1822 #if defined(NV_LINUX)
1823 device->bus == &pci_bus_type;
1824 #elif defined(NV_BSD)
1825 devclass_find("pci");
1826 #endif
1827
1828 #if defined(NV_DRM_DEVICE_HAS_PDEV)
1829 if (bus_is_pci) {
1830 dev->pdev = to_pci_dev(device);
1831 }
1832 #endif
1833
1834 /* Register DRM device to DRM sub-system */
1835
1836 if (drm_dev_register(dev, 0) != 0) {
1837 NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to register device");
1838 goto failed_drm_register;
1839 }
1840
1841 #if defined(NV_DRM_FBDEV_GENERIC_AVAILABLE)
1842 if (nv_drm_fbdev_module_param &&
1843 drm_core_check_feature(dev, DRIVER_MODESET)) {
1844
1845 if (bus_is_pci) {
1846 struct pci_dev *pdev = to_pci_dev(device);
1847
1848 #if defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_HAS_DRIVER_ARG)
1849 drm_aperture_remove_conflicting_pci_framebuffers(pdev, &nv_drm_driver);
1850 #else
1851 drm_aperture_remove_conflicting_pci_framebuffers(pdev, nv_drm_driver.name);
1852 #endif
1853 }
1854 drm_fbdev_generic_setup(dev, 32);
1855 }
1856 #endif /* defined(NV_DRM_FBDEV_GENERIC_AVAILABLE) */
1857
1858 /* Add NVIDIA-DRM device into list */
1859
1860 nv_dev->next = dev_list;
1861 dev_list = nv_dev;
1862
1863 return; /* Success */
1864
1865 failed_drm_register:
1866
1867 nv_drm_dev_free(dev);
1868
1869 failed_drm_alloc:
1870
1871 nv_drm_free(nv_dev);
1872 }
1873
1874 /*
1875 * Enumerate NVIDIA GPUs and allocate/register DRM device for each of them.
1876 */
1877 #if defined(NV_LINUX)
nv_drm_probe_devices(void)1878 int nv_drm_probe_devices(void)
1879 {
1880 nv_gpu_info_t *gpu_info = NULL;
1881 NvU32 gpu_count = 0;
1882 NvU32 i;
1883
1884 int ret = 0;
1885
1886 nv_drm_update_drm_driver_features();
1887
1888 /* Enumerate NVIDIA GPUs */
1889
1890 gpu_info = nv_drm_calloc(NV_MAX_GPUS, sizeof(*gpu_info));
1891
1892 if (gpu_info == NULL) {
1893 ret = -ENOMEM;
1894
1895 NV_DRM_LOG_ERR("Failed to allocate gpu ids arrays");
1896 goto done;
1897 }
1898
1899 gpu_count = nvKms->enumerateGpus(gpu_info);
1900
1901 if (gpu_count == 0) {
1902 NV_DRM_LOG_INFO("Not found NVIDIA GPUs");
1903 goto done;
1904 }
1905
1906 WARN_ON(gpu_count > NV_MAX_GPUS);
1907
1908 /* Register DRM device for each NVIDIA GPU */
1909
1910 for (i = 0; i < gpu_count; i++) {
1911 nv_drm_register_drm_device(&gpu_info[i]);
1912 }
1913
1914 done:
1915
1916 nv_drm_free(gpu_info);
1917
1918 return ret;
1919 }
1920 #endif
1921
1922 /*
1923 * Unregister all NVIDIA DRM devices.
1924 */
nv_drm_remove_devices(void)1925 void nv_drm_remove_devices(void)
1926 {
1927 while (dev_list != NULL) {
1928 struct nv_drm_device *next = dev_list->next;
1929 struct drm_device *dev = dev_list->dev;
1930
1931 drm_dev_unregister(dev);
1932 nv_drm_dev_free(dev);
1933
1934 nv_drm_free(dev_list);
1935
1936 dev_list = next;
1937 }
1938 }
1939
1940 /*
1941 * Handle system suspend and resume.
1942 *
1943 * Normally, a DRM driver would use drm_mode_config_helper_suspend() to save the
1944 * current state on suspend and drm_mode_config_helper_resume() to restore it
1945 * after resume. This works for upstream drivers because user-mode tasks are
1946 * frozen before the suspend hook is called.
1947 *
1948 * In the case of nvidia-drm, the suspend hook is also called when 'suspend' is
1949 * written to /proc/driver/nvidia/suspend, before user-mode tasks are frozen.
1950 * However, we don't actually need to save and restore the display state because
1951 * the driver requires a VT switch to an unused VT before suspending and a
1952 * switch back to the application (or fbdev console) on resume. The DRM client
1953 * (or fbdev helper functions) will restore the appropriate mode on resume.
1954 *
1955 */
nv_drm_suspend_resume(NvBool suspend)1956 void nv_drm_suspend_resume(NvBool suspend)
1957 {
1958 static DEFINE_MUTEX(nv_drm_suspend_mutex);
1959 static NvU32 nv_drm_suspend_count = 0;
1960 struct nv_drm_device *nv_dev;
1961
1962 mutex_lock(&nv_drm_suspend_mutex);
1963
1964 /*
1965 * Count the number of times the driver is asked to suspend. Suspend all DRM
1966 * devices on the first suspend call and resume them on the last resume
1967 * call. This is necessary because the kernel may call nvkms_suspend()
1968 * simultaneously for each GPU, but NVKMS itself also suspends all GPUs on
1969 * the first call.
1970 */
1971 if (suspend) {
1972 if (nv_drm_suspend_count++ > 0) {
1973 goto done;
1974 }
1975 } else {
1976 BUG_ON(nv_drm_suspend_count == 0);
1977
1978 if (--nv_drm_suspend_count > 0) {
1979 goto done;
1980 }
1981 }
1982
1983 #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
1984 nv_dev = dev_list;
1985
1986 /*
1987 * NVKMS shuts down all heads on suspend. Update DRM state accordingly.
1988 */
1989 for (nv_dev = dev_list; nv_dev; nv_dev = nv_dev->next) {
1990 struct drm_device *dev = nv_dev->dev;
1991
1992 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
1993 continue;
1994 }
1995
1996 if (suspend) {
1997 drm_kms_helper_poll_disable(dev);
1998 #if defined(NV_DRM_FBDEV_GENERIC_AVAILABLE)
1999 drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1);
2000 #endif
2001 drm_mode_config_reset(dev);
2002 } else {
2003 #if defined(NV_DRM_FBDEV_GENERIC_AVAILABLE)
2004 drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0);
2005 #endif
2006 drm_kms_helper_poll_enable(dev);
2007 }
2008 }
2009 #endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
2010
2011 done:
2012 mutex_unlock(&nv_drm_suspend_mutex);
2013 }
2014
2015 #endif /* NV_DRM_AVAILABLE */
2016