1 /*
2  * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include "nvidia-drm-conftest.h" /* NV_DRM_AVAILABLE and NV_DRM_DRM_GEM_H_PRESENT */
24 
25 #include "nvidia-drm-priv.h"
26 #include "nvidia-drm-drv.h"
27 #include "nvidia-drm-fb.h"
28 #include "nvidia-drm-modeset.h"
29 #include "nvidia-drm-encoder.h"
30 #include "nvidia-drm-connector.h"
31 #include "nvidia-drm-gem.h"
32 #include "nvidia-drm-crtc.h"
33 #include "nvidia-drm-prime-fence.h"
34 #include "nvidia-drm-helper.h"
35 #include "nvidia-drm-gem-nvkms-memory.h"
36 #include "nvidia-drm-gem-user-memory.h"
37 #include "nvidia-drm-gem-dma-buf.h"
38 
39 #if defined(NV_DRM_AVAILABLE)
40 
41 #include "nvidia-drm-ioctl.h"
42 
43 #if defined(NV_DRM_DRMP_H_PRESENT)
44 #include <drm/drmP.h>
45 #endif
46 
47 #if defined(NV_DRM_DRM_VBLANK_H_PRESENT)
48 #include <drm/drm_vblank.h>
49 #endif
50 
51 #if defined(NV_DRM_DRM_FILE_H_PRESENT)
52 #include <drm/drm_file.h>
53 #endif
54 
55 #if defined(NV_DRM_DRM_PRIME_H_PRESENT)
56 #include <drm/drm_prime.h>
57 #endif
58 
59 #if defined(NV_DRM_DRM_IOCTL_H_PRESENT)
60 #include <drm/drm_ioctl.h>
61 #endif
62 
63 #include <linux/pci.h>
64 
65 /*
66  * Commit fcd70cd36b9b ("drm: Split out drm_probe_helper.h")
67  * moves a number of helper function definitions from
68  * drm/drm_crtc_helper.h to a new drm_probe_helper.h.
69  */
70 #if defined(NV_DRM_DRM_PROBE_HELPER_H_PRESENT)
71 #include <drm/drm_probe_helper.h>
72 #endif
73 #include <drm/drm_crtc_helper.h>
74 
75 #if defined(NV_DRM_DRM_GEM_H_PRESENT)
76 #include <drm/drm_gem.h>
77 #endif
78 
79 #if defined(NV_DRM_DRM_AUTH_H_PRESENT)
80 #include <drm/drm_auth.h>
81 #endif
82 
83 #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
84 #include <drm/drm_atomic_helper.h>
85 #endif
86 
87 static struct nv_drm_device *dev_list = NULL;
88 
89 static const char* nv_get_input_colorspace_name(
90     enum NvKmsInputColorSpace colorSpace)
91 {
92     switch (colorSpace) {
93         case NVKMS_INPUT_COLORSPACE_NONE:
94             return "None";
95         case NVKMS_INPUT_COLORSPACE_SCRGB_LINEAR:
96             return "IEC 61966-2-2 linear FP";
97         case NVKMS_INPUT_COLORSPACE_BT2100_PQ:
98             return "ITU-R BT.2100-PQ YCbCr";
99         default:
100             /* We shoudn't hit this */
101             WARN_ON("Unsupported input colorspace");
102             return "None";
103     }
104 };
105 
106 #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
107 
108 static void nv_drm_output_poll_changed(struct drm_device *dev)
109 {
110     struct drm_connector *connector = NULL;
111     struct drm_mode_config *config = &dev->mode_config;
112 #if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
113     struct drm_connector_list_iter conn_iter;
114     nv_drm_connector_list_iter_begin(dev, &conn_iter);
115 #endif
116     /*
117      * Here drm_mode_config::mutex has been acquired unconditionally:
118      *
119      * - In the non-NV_DRM_CONNECTOR_LIST_ITER_PRESENT case, the mutex must
120      *   be held for the duration of walking over the connectors.
121      *
122      * - In the NV_DRM_CONNECTOR_LIST_ITER_PRESENT case, the mutex must be
123      *   held for the duration of a fill_modes() call chain:
124      *     connector->funcs->fill_modes()
125      *      |-> drm_helper_probe_single_connector_modes()
126      *
127      * It is easiest to always acquire the mutext for the entire connector
128      * loop.
129      */
130     mutex_lock(&config->mutex);
131 
132     nv_drm_for_each_connector(connector, &conn_iter, dev) {
133 
134         struct nv_drm_connector *nv_connector = to_nv_connector(connector);
135 
136         if (!nv_drm_connector_check_connection_status_dirty_and_clear(
137                 nv_connector)) {
138             continue;
139         }
140 
141         connector->funcs->fill_modes(
142             connector,
143             dev->mode_config.max_width, dev->mode_config.max_height);
144     }
145 
146     mutex_unlock(&config->mutex);
147 #if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
148     nv_drm_connector_list_iter_end(&conn_iter);
149 #endif
150 }
151 
152 static struct drm_framebuffer *nv_drm_framebuffer_create(
153     struct drm_device *dev,
154     struct drm_file *file,
155     #if defined(NV_DRM_HELPER_MODE_FILL_FB_STRUCT_HAS_CONST_MODE_CMD_ARG)
156     const struct drm_mode_fb_cmd2 *cmd
157     #else
158     struct drm_mode_fb_cmd2 *cmd
159     #endif
160 )
161 {
162     struct drm_mode_fb_cmd2 local_cmd;
163     struct drm_framebuffer *fb;
164 
165     local_cmd = *cmd;
166 
167     fb = nv_drm_internal_framebuffer_create(
168             dev,
169             file,
170             &local_cmd);
171 
172     #if !defined(NV_DRM_HELPER_MODE_FILL_FB_STRUCT_HAS_CONST_MODE_CMD_ARG)
173     *cmd = local_cmd;
174     #endif
175 
176     return fb;
177 }
178 
179 static const struct drm_mode_config_funcs nv_mode_config_funcs = {
180     .fb_create = nv_drm_framebuffer_create,
181 
182     .atomic_state_alloc = nv_drm_atomic_state_alloc,
183     .atomic_state_clear = nv_drm_atomic_state_clear,
184     .atomic_state_free  = nv_drm_atomic_state_free,
185     .atomic_check  = nv_drm_atomic_check,
186     .atomic_commit = nv_drm_atomic_commit,
187 
188     .output_poll_changed = nv_drm_output_poll_changed,
189 };
190 
191 static void nv_drm_event_callback(const struct NvKmsKapiEvent *event)
192 {
193     struct nv_drm_device *nv_dev = event->privateData;
194 
195     mutex_lock(&nv_dev->lock);
196 
197     if (!atomic_read(&nv_dev->enable_event_handling)) {
198         goto done;
199     }
200 
201     switch (event->type) {
202         case NVKMS_EVENT_TYPE_DPY_CHANGED:
203             nv_drm_handle_display_change(
204                 nv_dev,
205                 event->u.displayChanged.display);
206             break;
207 
208         case NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED:
209             nv_drm_handle_dynamic_display_connected(
210                 nv_dev,
211                 event->u.dynamicDisplayConnected.display);
212             break;
213         case NVKMS_EVENT_TYPE_FLIP_OCCURRED:
214             nv_drm_handle_flip_occurred(
215                 nv_dev,
216                 event->u.flipOccurred.head,
217                 event->u.flipOccurred.layer);
218             break;
219         default:
220             break;
221     }
222 
223 done:
224 
225     mutex_unlock(&nv_dev->lock);
226 }
227 
228 /*
229  * Helper function to initialize drm_device::mode_config from
230  * NvKmsKapiDevice's resource information.
231  */
232 static void
233 nv_drm_init_mode_config(struct nv_drm_device *nv_dev,
234                         const struct NvKmsKapiDeviceResourcesInfo *pResInfo)
235 {
236     struct drm_device *dev = nv_dev->dev;
237 
238     drm_mode_config_init(dev);
239     drm_mode_create_dvi_i_properties(dev);
240 
241     dev->mode_config.funcs = &nv_mode_config_funcs;
242 
243     dev->mode_config.min_width  = pResInfo->caps.minWidthInPixels;
244     dev->mode_config.min_height = pResInfo->caps.minHeightInPixels;
245 
246     dev->mode_config.max_width  = pResInfo->caps.maxWidthInPixels;
247     dev->mode_config.max_height = pResInfo->caps.maxHeightInPixels;
248 
249     dev->mode_config.cursor_width  = pResInfo->caps.maxCursorSizeInPixels;
250     dev->mode_config.cursor_height = pResInfo->caps.maxCursorSizeInPixels;
251 
252     /*
253      * NVIDIA GPUs have no preferred depth. Arbitrarily report 24, to be
254      * consistent with other DRM drivers.
255      */
256 
257     dev->mode_config.preferred_depth = 24;
258     dev->mode_config.prefer_shadow = 1;
259 
260 #if defined(NV_DRM_CRTC_STATE_HAS_ASYNC_FLIP) || \
261     defined(NV_DRM_CRTC_STATE_HAS_PAGEFLIP_FLAGS)
262     dev->mode_config.async_page_flip = true;
263 #else
264     dev->mode_config.async_page_flip = false;
265 #endif
266 
267 #if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT) && \
268     defined(NV_DRM_MODE_CONFIG_HAS_ALLOW_FB_MODIFIERS)
269     /* Allow clients to define framebuffer layouts using DRM format modifiers */
270     dev->mode_config.allow_fb_modifiers = true;
271 #endif
272 
273     /* Initialize output polling support */
274 
275     drm_kms_helper_poll_init(dev);
276 
277     /* Disable output polling, because we don't support it yet */
278 
279     drm_kms_helper_poll_disable(dev);
280 }
281 
282 /*
283  * Helper function to enumerate encoders/connectors from NvKmsKapiDevice.
284  */
285 static void nv_drm_enumerate_encoders_and_connectors
286 (
287     struct nv_drm_device *nv_dev
288 )
289 {
290     struct drm_device *dev = nv_dev->dev;
291     NvU32 nDisplays = 0;
292 
293     if (!nvKms->getDisplays(nv_dev->pDevice, &nDisplays, NULL)) {
294         NV_DRM_DEV_LOG_ERR(
295             nv_dev,
296             "Failed to enumurate NvKmsKapiDisplay count");
297     }
298 
299     if (nDisplays != 0) {
300         NvKmsKapiDisplay *hDisplays =
301             nv_drm_calloc(nDisplays, sizeof(*hDisplays));
302 
303         if (hDisplays != NULL) {
304             if (!nvKms->getDisplays(nv_dev->pDevice, &nDisplays, hDisplays)) {
305                 NV_DRM_DEV_LOG_ERR(
306                     nv_dev,
307                     "Failed to enumurate NvKmsKapiDisplay handles");
308             } else {
309                 NvU32 i;
310 
311                 for (i = 0; i < nDisplays; i++) {
312                     struct drm_encoder *encoder =
313                         nv_drm_add_encoder(dev, hDisplays[i]);
314 
315                     if (IS_ERR(encoder)) {
316                         NV_DRM_DEV_LOG_ERR(
317                             nv_dev,
318                             "Failed to add connector for NvKmsKapiDisplay 0x%08x",
319                             hDisplays[i]);
320                     }
321                 }
322             }
323 
324             nv_drm_free(hDisplays);
325         } else {
326             NV_DRM_DEV_LOG_ERR(
327                 nv_dev,
328                 "Failed to allocate memory for NvKmsKapiDisplay array");
329         }
330     }
331 }
332 
333 #endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
334 
335 /*!
336  * 'NV_DRM_OUT_FENCE_PTR' is an atomic per-plane property that clients can use
337  * to request an out-fence fd for a particular plane that's being flipped.
338  * 'NV_DRM_OUT_FENCE_PTR' does NOT have the same behavior as the standard
339  * 'OUT_FENCE_PTR' property - the fd that's returned via 'NV_DRM_OUT_FENCE_PTR'
340  * will only be signaled once the buffers in the corresponding flip are flipped
341  * away from.
342  * In order to use this property, client needs to call set property function
343  * with user mode pointer as value. Once driver have post syncpt fd from flip reply,
344  * it will copy post syncpt fd at location pointed by user mode pointer.
345  */
346 static int nv_drm_create_properties(struct nv_drm_device *nv_dev)
347 {
348     struct drm_prop_enum_list enum_list[3] = { };
349     int i, len = 0;
350 
351     for (i = 0; i < 3; i++) {
352         enum_list[len].type = i;
353         enum_list[len].name = nv_get_input_colorspace_name(i);
354         len++;
355     }
356 
357 #if defined(NV_LINUX_NVHOST_H_PRESENT) && defined(CONFIG_TEGRA_GRHOST)
358     if (!nv_dev->supportsSyncpts) {
359         return 0;
360     }
361 
362     nv_dev->nv_out_fence_property =
363         drm_property_create_range(nv_dev->dev, DRM_MODE_PROP_ATOMIC,
364             "NV_DRM_OUT_FENCE_PTR", 0, U64_MAX);
365     if (nv_dev->nv_out_fence_property == NULL) {
366         return -ENOMEM;
367     }
368 #endif
369 
370     nv_dev->nv_input_colorspace_property =
371         drm_property_create_enum(nv_dev->dev, 0, "NV_INPUT_COLORSPACE",
372                                  enum_list, len);
373     if (nv_dev->nv_input_colorspace_property == NULL) {
374         NV_DRM_LOG_ERR("Failed to create NV_INPUT_COLORSPACE property");
375         return -ENOMEM;
376     }
377 
378 #if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
379     nv_dev->nv_hdr_output_metadata_property =
380         drm_property_create(nv_dev->dev, DRM_MODE_PROP_BLOB,
381             "NV_HDR_STATIC_METADATA", 0);
382     if (nv_dev->nv_hdr_output_metadata_property == NULL) {
383         return -ENOMEM;
384     }
385 #endif
386 
387     return 0;
388 }
389 
390 static int nv_drm_load(struct drm_device *dev, unsigned long flags)
391 {
392 #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
393     struct NvKmsKapiDevice *pDevice;
394 
395     struct NvKmsKapiAllocateDeviceParams allocateDeviceParams;
396     struct NvKmsKapiDeviceResourcesInfo resInfo;
397 #endif
398 #if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT)
399     NvU64 kind;
400     NvU64 gen;
401     int i;
402 #endif
403     int ret;
404 
405     struct nv_drm_device *nv_dev = to_nv_device(dev);
406 
407     NV_DRM_DEV_LOG_INFO(nv_dev, "Loading driver");
408 
409 #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
410 
411     if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
412         return 0;
413     }
414 
415     /* Allocate NvKmsKapiDevice from GPU ID */
416 
417     memset(&allocateDeviceParams, 0, sizeof(allocateDeviceParams));
418 
419     allocateDeviceParams.gpuId = nv_dev->gpu_info.gpu_id;
420 
421     allocateDeviceParams.privateData = nv_dev;
422     allocateDeviceParams.eventCallback = nv_drm_event_callback;
423 
424     pDevice = nvKms->allocateDevice(&allocateDeviceParams);
425 
426     if (pDevice == NULL) {
427         NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to allocate NvKmsKapiDevice");
428         return -ENODEV;
429     }
430 
431     /* Query information of resources available on device */
432 
433     if (!nvKms->getDeviceResourcesInfo(pDevice, &resInfo)) {
434 
435         nvKms->freeDevice(pDevice);
436 
437         NV_DRM_DEV_LOG_ERR(
438             nv_dev,
439             "Failed to query NvKmsKapiDevice resources info");
440         return -ENODEV;
441     }
442 
443     mutex_lock(&nv_dev->lock);
444 
445     /* Set NvKmsKapiDevice */
446 
447     nv_dev->pDevice = pDevice;
448 
449     nv_dev->pitchAlignment = resInfo.caps.pitchAlignment;
450 
451     nv_dev->hasVideoMemory = resInfo.caps.hasVideoMemory;
452 
453     nv_dev->genericPageKind = resInfo.caps.genericPageKind;
454 
455     // Fermi-Volta use generation 0, Turing+ uses generation 2.
456     nv_dev->pageKindGeneration = (nv_dev->genericPageKind == 0x06) ? 2 : 0;
457 
458     // Desktop GPUs and mobile GPUs Xavier and later use the same sector layout
459     nv_dev->sectorLayout = 1;
460 
461     nv_dev->supportsSyncpts = resInfo.caps.supportsSyncpts;
462 
463 #if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT)
464     gen = nv_dev->pageKindGeneration;
465     kind = nv_dev->genericPageKind;
466 
467     for (i = 0; i <= 5; i++) {
468         nv_dev->modifiers[i] =
469             /*    Log2(block height) ----------------------------------+  *
470              *    Page Kind ------------------------------------+      |  *
471              *    Gob Height/Page Kind Generation --------+     |      |  *
472              *    Sector layout ---------------------+    |     |      |  *
473              *    Compression --------------------+  |    |     |      |  *
474              *                                    |  |    |     |      |  */
475             DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, gen, kind, 5 - i);
476     }
477 
478     nv_dev->modifiers[i++] = DRM_FORMAT_MOD_LINEAR;
479     nv_dev->modifiers[i++] = DRM_FORMAT_MOD_INVALID;
480 #endif /* defined(NV_DRM_FORMAT_MODIFIERS_PRESENT) */
481 
482     /* Initialize drm_device::mode_config */
483 
484     nv_drm_init_mode_config(nv_dev, &resInfo);
485 
486     ret = nv_drm_create_properties(nv_dev);
487     if (ret < 0) {
488         return -ENODEV;
489     }
490 
491     if (!nvKms->declareEventInterest(
492             nv_dev->pDevice,
493             ((1 << NVKMS_EVENT_TYPE_DPY_CHANGED) |
494              (1 << NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED) |
495              (1 << NVKMS_EVENT_TYPE_FLIP_OCCURRED)))) {
496         NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to register event mask");
497     }
498 
499     /* Add crtcs */
500 
501     nv_drm_enumerate_crtcs_and_planes(nv_dev, &resInfo);
502 
503     /* Add connectors and encoders */
504 
505     nv_drm_enumerate_encoders_and_connectors(nv_dev);
506 
507 #if !defined(NV_DRM_CRTC_STATE_HAS_NO_VBLANK)
508     drm_vblank_init(dev, dev->mode_config.num_crtc);
509 #endif
510 
511     /*
512      * Trigger hot-plug processing, to update connection status of
513      * all HPD supported connectors.
514      */
515 
516     drm_helper_hpd_irq_event(dev);
517 
518     /* Enable event handling */
519 
520     atomic_set(&nv_dev->enable_event_handling, true);
521 
522     init_waitqueue_head(&nv_dev->flip_event_wq);
523 
524     mutex_unlock(&nv_dev->lock);
525 
526 #endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
527 
528     return 0;
529 }
530 
531 static void __nv_drm_unload(struct drm_device *dev)
532 {
533 #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
534     struct NvKmsKapiDevice *pDevice = NULL;
535 #endif
536 
537     struct nv_drm_device *nv_dev = to_nv_device(dev);
538 
539     NV_DRM_DEV_LOG_INFO(nv_dev, "Unloading driver");
540 
541 #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
542 
543     if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
544         return;
545     }
546 
547     mutex_lock(&nv_dev->lock);
548 
549     /* Disable event handling */
550 
551     atomic_set(&nv_dev->enable_event_handling, false);
552 
553     /* Clean up output polling */
554 
555     drm_kms_helper_poll_fini(dev);
556 
557     /* Clean up mode configuration */
558 
559     drm_mode_config_cleanup(dev);
560 
561     if (!nvKms->declareEventInterest(nv_dev->pDevice, 0x0)) {
562         NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to stop event listening");
563     }
564 
565     /* Unset NvKmsKapiDevice */
566 
567     pDevice = nv_dev->pDevice;
568     nv_dev->pDevice = NULL;
569 
570     mutex_unlock(&nv_dev->lock);
571 
572     nvKms->freeDevice(pDevice);
573 
574 #endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
575 }
576 
577 #if defined(NV_DRM_DRIVER_UNLOAD_HAS_INT_RETURN_TYPE)
578 static int nv_drm_unload(struct drm_device *dev)
579 {
580     __nv_drm_unload(dev);
581 
582     return 0;
583 }
584 #else
585 static void nv_drm_unload(struct drm_device *dev)
586 {
587     __nv_drm_unload(dev);
588 }
589 #endif
590 
591 #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
592 
593 static int __nv_drm_master_set(struct drm_device *dev,
594                                struct drm_file *file_priv, bool from_open)
595 {
596     struct nv_drm_device *nv_dev = to_nv_device(dev);
597 
598     if (!nvKms->grabOwnership(nv_dev->pDevice)) {
599         return -EINVAL;
600     }
601 
602     return 0;
603 }
604 
605 #if defined(NV_DRM_DRIVER_SET_MASTER_HAS_INT_RETURN_TYPE)
606 static int nv_drm_master_set(struct drm_device *dev,
607                              struct drm_file *file_priv, bool from_open)
608 {
609     return __nv_drm_master_set(dev, file_priv, from_open);
610 }
611 #else
612 static void nv_drm_master_set(struct drm_device *dev,
613                               struct drm_file *file_priv, bool from_open)
614 {
615     if (__nv_drm_master_set(dev, file_priv, from_open) != 0) {
616         NV_DRM_DEV_LOG_ERR(to_nv_device(dev), "Failed to grab modeset ownership");
617     }
618 }
619 #endif
620 
621 
622 #if defined(NV_DRM_MASTER_DROP_HAS_FROM_RELEASE_ARG)
623 static
624 void nv_drm_master_drop(struct drm_device *dev,
625                         struct drm_file *file_priv, bool from_release)
626 #else
627 static
628 void nv_drm_master_drop(struct drm_device *dev, struct drm_file *file_priv)
629 #endif
630 {
631     struct nv_drm_device *nv_dev = to_nv_device(dev);
632     int err;
633 
634     /*
635      * After dropping nvkms modeset onwership, it is not guaranteed that
636      * drm and nvkms modeset state will remain in sync.  Therefore, disable
637      * all outputs and crtcs before dropping nvkms modeset ownership.
638      *
639      * First disable all active outputs atomically and then disable each crtc one
640      * by one, there is not helper function available to disable all crtcs
641      * atomically.
642      */
643 
644     drm_modeset_lock_all(dev);
645 
646     if ((err = nv_drm_atomic_helper_disable_all(
647             dev,
648             dev->mode_config.acquire_ctx)) != 0) {
649 
650         NV_DRM_DEV_LOG_ERR(
651             nv_dev,
652             "nv_drm_atomic_helper_disable_all failed with error code %d !",
653             err);
654     }
655 
656     drm_modeset_unlock_all(dev);
657 
658     nvKms->releaseOwnership(nv_dev->pDevice);
659 }
660 #endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
661 
662 #if defined(NV_DRM_BUS_PRESENT) || defined(NV_DRM_DRIVER_HAS_SET_BUSID)
663 static int nv_drm_pci_set_busid(struct drm_device *dev,
664                                 struct drm_master *master)
665 {
666     struct nv_drm_device *nv_dev = to_nv_device(dev);
667 
668     master->unique = nv_drm_asprintf("pci:%04x:%02x:%02x.%d",
669                                           nv_dev->gpu_info.pci_info.domain,
670                                           nv_dev->gpu_info.pci_info.bus,
671                                           nv_dev->gpu_info.pci_info.slot,
672                                           nv_dev->gpu_info.pci_info.function);
673 
674     if (master->unique == NULL) {
675         return -ENOMEM;
676     }
677 
678     master->unique_len = strlen(master->unique);
679 
680     return 0;
681 }
682 #endif
683 
684 static int nv_drm_get_dev_info_ioctl(struct drm_device *dev,
685                                      void *data, struct drm_file *filep)
686 {
687     struct nv_drm_device *nv_dev = to_nv_device(dev);
688     struct drm_nvidia_get_dev_info_params *params = data;
689 
690     if (dev->primary == NULL) {
691         return -ENOENT;
692     }
693 
694     params->gpu_id = nv_dev->gpu_info.gpu_id;
695     params->primary_index = dev->primary->index;
696 #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
697     params->generic_page_kind = nv_dev->genericPageKind;
698     params->page_kind_generation = nv_dev->pageKindGeneration;
699     params->sector_layout = nv_dev->sectorLayout;
700 #else
701     params->generic_page_kind = 0;
702     params->page_kind_generation = 0;
703     params->sector_layout = 0;
704 #endif
705 
706     return 0;
707 }
708 
709 static
710 int nv_drm_get_client_capability_ioctl(struct drm_device *dev,
711                                        void *data, struct drm_file *filep)
712 {
713     struct drm_nvidia_get_client_capability_params *params = data;
714 
715     switch (params->capability) {
716 #if defined(DRM_CLIENT_CAP_STEREO_3D)
717         case DRM_CLIENT_CAP_STEREO_3D:
718             params->value = filep->stereo_allowed;
719             break;
720 #endif
721 #if defined(DRM_CLIENT_CAP_UNIVERSAL_PLANES)
722         case DRM_CLIENT_CAP_UNIVERSAL_PLANES:
723             params->value = filep->universal_planes;
724             break;
725 #endif
726 #if defined(DRM_CLIENT_CAP_ATOMIC)
727         case DRM_CLIENT_CAP_ATOMIC:
728             params->value = filep->atomic;
729             break;
730 #endif
731         default:
732             return -EINVAL;
733     }
734 
735     return 0;
736 }
737 
738 #if defined(NV_DRM_BUS_PRESENT)
739 
740 #if defined(NV_DRM_BUS_HAS_GET_IRQ)
741 static int nv_drm_bus_get_irq(struct drm_device *dev)
742 {
743     return 0;
744 }
745 #endif
746 
747 #if defined(NV_DRM_BUS_HAS_GET_NAME)
748 static const char *nv_drm_bus_get_name(struct drm_device *dev)
749 {
750     return "nvidia-drm";
751 }
752 #endif
753 
754 static struct drm_bus nv_drm_bus = {
755 #if defined(NV_DRM_BUS_HAS_BUS_TYPE)
756     .bus_type     = DRIVER_BUS_PCI,
757 #endif
758 #if defined(NV_DRM_BUS_HAS_GET_IRQ)
759     .get_irq      = nv_drm_bus_get_irq,
760 #endif
761 #if defined(NV_DRM_BUS_HAS_GET_NAME)
762     .get_name     = nv_drm_bus_get_name,
763 #endif
764     .set_busid    = nv_drm_pci_set_busid,
765 };
766 
767 #endif /* NV_DRM_BUS_PRESENT */
768 
769 static const struct file_operations nv_drm_fops = {
770     .owner          = THIS_MODULE,
771 
772     .open           = drm_open,
773     .release        = drm_release,
774     .unlocked_ioctl = drm_ioctl,
775 #if defined(CONFIG_COMPAT)
776     .compat_ioctl   = drm_compat_ioctl,
777 #endif
778 
779 #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
780     .mmap           = nv_drm_mmap,
781 #endif
782 
783     .poll           = drm_poll,
784     .read           = drm_read,
785 
786     .llseek         = noop_llseek,
787 };
788 
789 static const struct drm_ioctl_desc nv_drm_ioctls[] = {
790 #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
791     DRM_IOCTL_DEF_DRV(NVIDIA_GEM_IMPORT_NVKMS_MEMORY,
792                       nv_drm_gem_import_nvkms_memory_ioctl,
793                       DRM_RENDER_ALLOW|DRM_UNLOCKED),
794 #endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
795 
796     DRM_IOCTL_DEF_DRV(NVIDIA_GEM_IMPORT_USERSPACE_MEMORY,
797                       nv_drm_gem_import_userspace_memory_ioctl,
798                       DRM_RENDER_ALLOW|DRM_UNLOCKED),
799     DRM_IOCTL_DEF_DRV(NVIDIA_GEM_MAP_OFFSET,
800                       nv_drm_gem_map_offset_ioctl,
801                       DRM_RENDER_ALLOW|DRM_UNLOCKED),
802     DRM_IOCTL_DEF_DRV(NVIDIA_GET_DEV_INFO,
803                       nv_drm_get_dev_info_ioctl,
804                       DRM_RENDER_ALLOW|DRM_UNLOCKED),
805 
806 #if defined(NV_DRM_FENCE_AVAILABLE)
807     DRM_IOCTL_DEF_DRV(NVIDIA_FENCE_SUPPORTED,
808                       nv_drm_fence_supported_ioctl,
809                       DRM_RENDER_ALLOW|DRM_UNLOCKED),
810     DRM_IOCTL_DEF_DRV(NVIDIA_FENCE_CONTEXT_CREATE,
811                       nv_drm_fence_context_create_ioctl,
812                       DRM_RENDER_ALLOW|DRM_UNLOCKED),
813     DRM_IOCTL_DEF_DRV(NVIDIA_GEM_FENCE_ATTACH,
814                       nv_drm_gem_fence_attach_ioctl,
815                       DRM_RENDER_ALLOW|DRM_UNLOCKED),
816 #endif
817 
818     DRM_IOCTL_DEF_DRV(NVIDIA_GET_CLIENT_CAPABILITY,
819                       nv_drm_get_client_capability_ioctl,
820                       0),
821 #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
822     DRM_IOCTL_DEF_DRV(NVIDIA_GET_CRTC_CRC32,
823                       nv_drm_get_crtc_crc32_ioctl,
824                       DRM_RENDER_ALLOW|DRM_UNLOCKED),
825     DRM_IOCTL_DEF_DRV(NVIDIA_GET_CRTC_CRC32_V2,
826                       nv_drm_get_crtc_crc32_v2_ioctl,
827                       DRM_RENDER_ALLOW|DRM_UNLOCKED),
828     DRM_IOCTL_DEF_DRV(NVIDIA_GEM_EXPORT_NVKMS_MEMORY,
829                       nv_drm_gem_export_nvkms_memory_ioctl,
830                       DRM_RENDER_ALLOW|DRM_UNLOCKED),
831     DRM_IOCTL_DEF_DRV(NVIDIA_GEM_ALLOC_NVKMS_MEMORY,
832                       nv_drm_gem_alloc_nvkms_memory_ioctl,
833                       DRM_RENDER_ALLOW|DRM_UNLOCKED),
834     DRM_IOCTL_DEF_DRV(NVIDIA_GEM_EXPORT_DMABUF_MEMORY,
835                       nv_drm_gem_export_dmabuf_memory_ioctl,
836                       DRM_RENDER_ALLOW|DRM_UNLOCKED),
837     DRM_IOCTL_DEF_DRV(NVIDIA_GEM_IDENTIFY_OBJECT,
838                       nv_drm_gem_identify_object_ioctl,
839                       DRM_RENDER_ALLOW|DRM_UNLOCKED),
840 #endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
841 };
842 
843 static struct drm_driver nv_drm_driver = {
844 
845     .driver_features        =
846 #if defined(NV_DRM_DRIVER_PRIME_FLAG_PRESENT)
847                                DRIVER_PRIME |
848 #endif
849                                DRIVER_GEM  | DRIVER_RENDER,
850 
851 #if defined(NV_DRM_DRIVER_HAS_GEM_FREE_OBJECT)
852     .gem_free_object        = nv_drm_gem_free,
853 #endif
854 
855     .ioctls                 = nv_drm_ioctls,
856     .num_ioctls             = ARRAY_SIZE(nv_drm_ioctls),
857 
858     .prime_handle_to_fd     = drm_gem_prime_handle_to_fd,
859     .prime_fd_to_handle     = drm_gem_prime_fd_to_handle,
860     .gem_prime_import       = nv_drm_gem_prime_import,
861     .gem_prime_import_sg_table = nv_drm_gem_prime_import_sg_table,
862 
863 #if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_CALLBACKS)
864     .gem_prime_export       = drm_gem_prime_export,
865     .gem_prime_get_sg_table = nv_drm_gem_prime_get_sg_table,
866     .gem_prime_vmap         = nv_drm_gem_prime_vmap,
867     .gem_prime_vunmap       = nv_drm_gem_prime_vunmap,
868 
869     .gem_vm_ops             = &nv_drm_gem_vma_ops,
870 #endif
871 
872 #if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ)
873     .gem_prime_res_obj      = nv_drm_gem_prime_res_obj,
874 #endif
875 
876 #if defined(NV_DRM_DRIVER_HAS_SET_BUSID)
877     .set_busid              = nv_drm_pci_set_busid,
878 #endif
879 
880     .load                   = nv_drm_load,
881     .unload                 = nv_drm_unload,
882 
883     .fops                   = &nv_drm_fops,
884 
885 #if defined(NV_DRM_BUS_PRESENT)
886     .bus                    = &nv_drm_bus,
887 #endif
888 
889     .name                   = "nvidia-drm",
890 
891     .desc                   = "NVIDIA DRM driver",
892     .date                   = "20160202",
893 
894 #if defined(NV_DRM_DRIVER_HAS_DEVICE_LIST)
895     .device_list            = LIST_HEAD_INIT(nv_drm_driver.device_list),
896 #elif defined(NV_DRM_DRIVER_HAS_LEGACY_DEV_LIST)
897     .legacy_dev_list        = LIST_HEAD_INIT(nv_drm_driver.legacy_dev_list),
898 #endif
899 };
900 
901 
902 /*
903  * Update the global nv_drm_driver for the intended features.
904  *
905  * It defaults to PRIME-only, but is upgraded to atomic modeset if the
906  * kernel supports atomic modeset and the 'modeset' kernel module
907  * parameter is true.
908  */
909 static void nv_drm_update_drm_driver_features(void)
910 {
911 #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
912 
913     if (!nv_drm_modeset_module_param) {
914         return;
915     }
916 
917     nv_drm_driver.driver_features |= DRIVER_MODESET | DRIVER_ATOMIC;
918 
919     nv_drm_driver.master_set       = nv_drm_master_set;
920     nv_drm_driver.master_drop      = nv_drm_master_drop;
921 
922     nv_drm_driver.dumb_create      = nv_drm_dumb_create;
923     nv_drm_driver.dumb_map_offset  = nv_drm_dumb_map_offset;
924     nv_drm_driver.dumb_destroy     = nv_drm_dumb_destroy;
925 #endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
926 }
927 
928 
929 
930 /*
931  * Helper function for allocate/register DRM device for given NVIDIA GPU ID.
932  */
933 static void nv_drm_register_drm_device(const nv_gpu_info_t *gpu_info)
934 {
935     struct nv_drm_device *nv_dev = NULL;
936     struct drm_device *dev = NULL;
937     struct device *device = gpu_info->os_device_ptr;
938 
939     DRM_DEBUG(
940         "Registering device for NVIDIA GPU ID 0x08%x",
941         gpu_info->gpu_id);
942 
943     /* Allocate NVIDIA-DRM device */
944 
945     nv_dev = nv_drm_calloc(1, sizeof(*nv_dev));
946 
947     if (nv_dev == NULL) {
948         NV_DRM_LOG_ERR(
949             "Failed to allocate memory for NVIDIA-DRM device object");
950         return;
951     }
952 
953     nv_dev->gpu_info = *gpu_info;
954 
955 #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
956     mutex_init(&nv_dev->lock);
957 #endif
958 
959     /* Allocate DRM device */
960 
961     dev = drm_dev_alloc(&nv_drm_driver, device);
962 
963     if (dev == NULL) {
964         NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to allocate device");
965         goto failed_drm_alloc;
966     }
967 
968     dev->dev_private = nv_dev;
969     nv_dev->dev = dev;
970 
971 #if defined(NV_DRM_DEVICE_HAS_PDEV)
972     if (device->bus == &pci_bus_type) {
973         dev->pdev = to_pci_dev(device);
974     }
975 #endif
976 
977     /* Register DRM device to DRM sub-system */
978 
979     if (drm_dev_register(dev, 0) != 0) {
980         NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to register device");
981         goto failed_drm_register;
982     }
983 
984     /* Add NVIDIA-DRM device into list */
985 
986     nv_dev->next = dev_list;
987     dev_list = nv_dev;
988 
989     return; /* Success */
990 
991 failed_drm_register:
992 
993     nv_drm_dev_free(dev);
994 
995 failed_drm_alloc:
996 
997     nv_drm_free(nv_dev);
998 }
999 
1000 /*
1001  * Enumerate NVIDIA GPUs and allocate/register DRM device for each of them.
1002  */
1003 int nv_drm_probe_devices(void)
1004 {
1005     nv_gpu_info_t *gpu_info = NULL;
1006     NvU32 gpu_count = 0;
1007     NvU32 i;
1008 
1009     int ret = 0;
1010 
1011     nv_drm_update_drm_driver_features();
1012 
1013     /* Enumerate NVIDIA GPUs */
1014 
1015     gpu_info = nv_drm_calloc(NV_MAX_GPUS, sizeof(*gpu_info));
1016 
1017     if (gpu_info == NULL) {
1018         ret = -ENOMEM;
1019 
1020         NV_DRM_LOG_ERR("Failed to allocate gpu ids arrays");
1021         goto done;
1022     }
1023 
1024     gpu_count = nvKms->enumerateGpus(gpu_info);
1025 
1026     if (gpu_count == 0) {
1027         NV_DRM_LOG_INFO("Not found NVIDIA GPUs");
1028         goto done;
1029     }
1030 
1031     WARN_ON(gpu_count > NV_MAX_GPUS);
1032 
1033     /* Register DRM device for each NVIDIA GPU */
1034 
1035     for (i = 0; i < gpu_count; i++) {
1036         nv_drm_register_drm_device(&gpu_info[i]);
1037     }
1038 
1039 done:
1040 
1041     nv_drm_free(gpu_info);
1042 
1043     return ret;
1044 }
1045 
1046 /*
1047  * Unregister all NVIDIA DRM devices.
1048  */
1049 void nv_drm_remove_devices(void)
1050 {
1051     while (dev_list != NULL) {
1052         struct nv_drm_device *next = dev_list->next;
1053 
1054         drm_dev_unregister(dev_list->dev);
1055         nv_drm_dev_free(dev_list->dev);
1056 
1057         nv_drm_free(dev_list);
1058 
1059         dev_list = next;
1060     }
1061 }
1062 
1063 #endif /* NV_DRM_AVAILABLE */
1064