1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2015-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/string.h>
28 #include <linux/delay.h>
29 #include <linux/vmalloc.h>
30 #include <asm/div64.h> /* do_div() */
31 #include <linux/sched.h>
32 #include <linux/wait.h>
33 #include <linux/random.h>
34 #include <linux/file.h>
35 #include <linux/list.h>
36 #include <linux/rwsem.h>
37 #include <linux/freezer.h>
38 #include <linux/poll.h>
39 #include <linux/cdev.h>
40 
41 #include <acpi/video.h>
42 
43 #include "nvstatus.h"
44 
45 #include "nv-modeset-interface.h"
46 #include "nv-kref.h"
47 
48 #include "nvidia-modeset-os-interface.h"
49 #include "nvkms.h"
50 #include "nvkms-ioctl.h"
51 
52 #include "conftest.h"
53 #include "nv-procfs.h"
54 #include "nv-kthread-q.h"
55 #include "nv-time.h"
56 #include "nv-lock.h"
57 #include "nv-chardev-numbers.h"
58 
59 /*
60  * Commit aefb2f2e619b ("x86/bugs: Rename CONFIG_RETPOLINE =>
61  * CONFIG_MITIGATION_RETPOLINE) in v6.8 renamed CONFIG_RETPOLINE.
62  */
63 #if !defined(CONFIG_RETPOLINE) && !defined(CONFIG_MITIGATION_RETPOLINE)
64 #include "nv-retpoline.h"
65 #endif
66 
67 #include <linux/backlight.h>
68 
69 #define NVKMS_LOG_PREFIX "nvidia-modeset: "
70 
71 static bool output_rounding_fix = true;
72 module_param_named(output_rounding_fix, output_rounding_fix, bool, 0400);
73 
74 static bool disable_hdmi_frl = false;
75 module_param_named(disable_hdmi_frl, disable_hdmi_frl, bool, 0400);
76 
77 static bool disable_vrr_memclk_switch = false;
78 module_param_named(disable_vrr_memclk_switch, disable_vrr_memclk_switch, bool, 0400);
79 
80 static bool hdmi_deepcolor = false;
81 module_param_named(hdmi_deepcolor, hdmi_deepcolor, bool, 0400);
82 
83 static bool vblank_sem_control = false;
84 module_param_named(vblank_sem_control, vblank_sem_control, bool, 0400);
85 
86 static bool opportunistic_display_sync = true;
87 module_param_named(opportunistic_display_sync, opportunistic_display_sync, bool, 0400);
88 
89 /* These parameters are used for fault injection tests.  Normally the defaults
90  * should be used. */
91 MODULE_PARM_DESC(fail_malloc, "Fail the Nth call to nvkms_alloc");
92 static int fail_malloc_num = -1;
93 module_param_named(fail_malloc, fail_malloc_num, int, 0400);
94 
95 MODULE_PARM_DESC(malloc_verbose, "Report information about malloc calls on module unload");
96 static bool malloc_verbose = false;
97 module_param_named(malloc_verbose, malloc_verbose, bool, 0400);
98 
99 #if NVKMS_CONFIG_FILE_SUPPORTED
100 /* This parameter is used to find the dpy override conf file */
101 #define NVKMS_CONF_FILE_SPECIFIED (nvkms_conf != NULL)
102 
103 MODULE_PARM_DESC(config_file,
104                  "Path to the nvidia-modeset configuration file "
105                  "(default: disabled)");
106 static char *nvkms_conf = NULL;
107 module_param_named(config_file, nvkms_conf, charp, 0400);
108 #endif
109 
110 static atomic_t nvkms_alloc_called_count;
111 
nvkms_output_rounding_fix(void)112 NvBool nvkms_output_rounding_fix(void)
113 {
114     return output_rounding_fix;
115 }
116 
nvkms_disable_hdmi_frl(void)117 NvBool nvkms_disable_hdmi_frl(void)
118 {
119     return disable_hdmi_frl;
120 }
121 
nvkms_disable_vrr_memclk_switch(void)122 NvBool nvkms_disable_vrr_memclk_switch(void)
123 {
124     return disable_vrr_memclk_switch;
125 }
126 
nvkms_hdmi_deepcolor(void)127 NvBool nvkms_hdmi_deepcolor(void)
128 {
129     return hdmi_deepcolor;
130 }
131 
nvkms_vblank_sem_control(void)132 NvBool nvkms_vblank_sem_control(void)
133 {
134     return vblank_sem_control;
135 }
136 
nvkms_opportunistic_display_sync(void)137 NvBool nvkms_opportunistic_display_sync(void)
138 {
139     return opportunistic_display_sync;
140 }
141 
142 #define NVKMS_SYNCPT_STUBS_NEEDED
143 
144 /*************************************************************************
145  * NVKMS interface for nvhost unit for sync point APIs.
146  *************************************************************************/
147 
148 #ifdef NVKMS_SYNCPT_STUBS_NEEDED
149 /* Unsupported STUB for nvkms_syncpt APIs */
nvkms_syncpt_op(enum NvKmsSyncPtOp op,NvKmsSyncPtOpParams * params)150 NvBool nvkms_syncpt_op(
151     enum NvKmsSyncPtOp op,
152     NvKmsSyncPtOpParams *params)
153 {
154     return NV_FALSE;
155 }
156 #endif
157 
158 #define NVKMS_MAJOR_DEVICE_NUMBER 195
159 #define NVKMS_MINOR_DEVICE_NUMBER 254
160 
161 /*
162  * Convert from microseconds to jiffies.  The conversion is:
163  * ((usec) * HZ / 1000000)
164  *
165  * Use do_div() to avoid gcc-generated references to __udivdi3().
166  * Note that the do_div() macro divides the first argument in place.
167  */
NVKMS_USECS_TO_JIFFIES(NvU64 usec)168 static inline unsigned long NVKMS_USECS_TO_JIFFIES(NvU64 usec)
169 {
170     unsigned long result = usec * HZ;
171     do_div(result, 1000000);
172     return result;
173 }
174 
175 
176 /*************************************************************************
177  * NVKMS uses a global lock, nvkms_lock.  The lock is taken in the
178  * file operation callback functions when calling into core NVKMS.
179  *************************************************************************/
180 
181 static struct semaphore nvkms_lock;
182 
183 /*************************************************************************
184  * User clients of NVKMS may need to be synchronized with suspend/resume
185  * operations.  This depends on the state of the system when the NVKMS
186  * suspend/resume callbacks are invoked.  NVKMS uses a single
187  * RW lock, nvkms_pm_lock, for this synchronization.
188  *************************************************************************/
189 
190 static struct rw_semaphore nvkms_pm_lock;
191 
192 /*************************************************************************
193  * NVKMS executes almost all of its queued work items on a single
194  * kthread.  The exception are deferred close() handlers, which typically
195  * block for long periods of time and stall their queue.
196  *************************************************************************/
197 
198 static struct nv_kthread_q nvkms_kthread_q;
199 static struct nv_kthread_q nvkms_deferred_close_kthread_q;
200 
201 /*************************************************************************
202  * The nvkms_per_open structure tracks data that is specific to a
203  * single open.
204  *************************************************************************/
205 
206 struct nvkms_per_open {
207     void *data;
208 
209     enum NvKmsClientType type;
210 
211     union {
212         struct {
213             struct {
214                 atomic_t available;
215                 wait_queue_head_t wait_queue;
216             } events;
217         } user;
218 
219         struct {
220             struct {
221                 nv_kthread_q_item_t nv_kthread_q_item;
222             } events;
223         } kernel;
224     } u;
225 
226     nv_kthread_q_item_t deferred_close_q_item;
227 };
228 
229 /*************************************************************************
230  * nvkms_pm_lock helper functions.  Since no down_read_interruptible()
231  * or equivalent interface is available, it needs to be approximated with
232  * down_read_trylock() to enable the kernel's freezer to round up user
233  * threads going into suspend.
234  *************************************************************************/
235 
nvkms_read_trylock_pm_lock(void)236 static inline int nvkms_read_trylock_pm_lock(void)
237 {
238     return !down_read_trylock(&nvkms_pm_lock);
239 }
240 
nvkms_read_lock_pm_lock(void)241 static inline void nvkms_read_lock_pm_lock(void)
242 {
243     if ((current->flags & PF_NOFREEZE)) {
244         /*
245          * Non-freezable tasks (i.e. kthreads in this case) don't have to worry
246          * about being frozen during system suspend, but do need to block so
247          * that the CPU can go idle during s2idle. Do a normal uninterruptible
248          * blocking wait for the PM lock.
249          */
250         down_read(&nvkms_pm_lock);
251     } else {
252         /*
253          * For freezable tasks, make sure we give the kernel an opportunity to
254          * freeze if taking the PM lock fails.
255          */
256         while (!down_read_trylock(&nvkms_pm_lock)) {
257             try_to_freeze();
258             cond_resched();
259         }
260     }
261 }
262 
nvkms_read_unlock_pm_lock(void)263 static inline void nvkms_read_unlock_pm_lock(void)
264 {
265     up_read(&nvkms_pm_lock);
266 }
267 
nvkms_write_lock_pm_lock(void)268 static inline void nvkms_write_lock_pm_lock(void)
269 {
270     down_write(&nvkms_pm_lock);
271 }
272 
nvkms_write_unlock_pm_lock(void)273 static inline void nvkms_write_unlock_pm_lock(void)
274 {
275     up_write(&nvkms_pm_lock);
276 }
277 
278 /*************************************************************************
279  * nvidia-modeset-os-interface.h functions.  It is assumed that these
280  * are called while nvkms_lock is held.
281  *************************************************************************/
282 
283 /* Don't use kmalloc for allocations larger than one page */
284 #define KMALLOC_LIMIT PAGE_SIZE
285 
nvkms_alloc(size_t size,NvBool zero)286 void* nvkms_alloc(size_t size, NvBool zero)
287 {
288     void *p;
289 
290     if (malloc_verbose || fail_malloc_num >= 0) {
291         int this_alloc = atomic_inc_return(&nvkms_alloc_called_count) - 1;
292         if (fail_malloc_num >= 0 && fail_malloc_num == this_alloc) {
293             printk(KERN_WARNING NVKMS_LOG_PREFIX "Failing alloc %d\n",
294                    fail_malloc_num);
295             return NULL;
296         }
297     }
298 
299     if (size <= KMALLOC_LIMIT) {
300         p = kmalloc(size, GFP_KERNEL);
301     } else {
302         p = vmalloc(size);
303     }
304 
305     if (zero && (p != NULL)) {
306         memset(p, 0, size);
307     }
308 
309     return p;
310 }
311 
nvkms_free(void * ptr,size_t size)312 void nvkms_free(void *ptr, size_t size)
313 {
314     if (size <= KMALLOC_LIMIT) {
315         kfree(ptr);
316     } else {
317         vfree(ptr);
318     }
319 }
320 
nvkms_memset(void * ptr,NvU8 c,size_t size)321 void* nvkms_memset(void *ptr, NvU8 c, size_t size)
322 {
323     return memset(ptr, c, size);
324 }
325 
nvkms_memcpy(void * dest,const void * src,size_t n)326 void* nvkms_memcpy(void *dest, const void *src, size_t n)
327 {
328     return memcpy(dest, src, n);
329 }
330 
nvkms_memmove(void * dest,const void * src,size_t n)331 void* nvkms_memmove(void *dest, const void *src, size_t n)
332 {
333     return memmove(dest, src, n);
334 }
335 
nvkms_memcmp(const void * s1,const void * s2,size_t n)336 int nvkms_memcmp(const void *s1, const void *s2, size_t n)
337 {
338     return memcmp(s1, s2, n);
339 }
340 
nvkms_strlen(const char * s)341 size_t nvkms_strlen(const char *s)
342 {
343     return strlen(s);
344 }
345 
nvkms_strcmp(const char * s1,const char * s2)346 int nvkms_strcmp(const char *s1, const char *s2)
347 {
348     return strcmp(s1, s2);
349 }
350 
nvkms_strncpy(char * dest,const char * src,size_t n)351 char* nvkms_strncpy(char *dest, const char *src, size_t n)
352 {
353     return strncpy(dest, src, n);
354 }
355 
nvkms_usleep(NvU64 usec)356 void nvkms_usleep(NvU64 usec)
357 {
358     if (usec < 1000) {
359         /*
360          * If the period to wait is less than one millisecond, sleep
361          * using udelay(); note this is a busy wait.
362          */
363         udelay(usec);
364     } else {
365         /*
366          * Otherwise, sleep with millisecond precision.  Clamp the
367          * time to ~4 seconds (0xFFF/1000 => 4.09 seconds).
368          *
369          * Note that the do_div() macro divides the first argument in
370          * place.
371          */
372 
373         int msec;
374         NvU64 tmp = usec + 500;
375         do_div(tmp, 1000);
376         msec = (int) (tmp & 0xFFF);
377 
378         /*
379          * XXX NVKMS TODO: this may need to be msleep_interruptible(),
380          * though the callers would need to be made to handle
381          * returning early.
382          */
383         msleep(msec);
384     }
385 }
386 
nvkms_get_usec(void)387 NvU64 nvkms_get_usec(void)
388 {
389     struct timespec64 ts;
390     NvU64 ns;
391 
392     ktime_get_raw_ts64(&ts);
393 
394     ns = timespec64_to_ns(&ts);
395     return ns / 1000;
396 }
397 
nvkms_copyin(void * kptr,NvU64 uaddr,size_t n)398 int nvkms_copyin(void *kptr, NvU64 uaddr, size_t n)
399 {
400     if (!nvKmsNvU64AddressIsSafe(uaddr)) {
401         return -EINVAL;
402     }
403 
404     if (copy_from_user(kptr, nvKmsNvU64ToPointer(uaddr), n) != 0) {
405         return -EFAULT;
406     }
407 
408     return 0;
409 }
410 
nvkms_copyout(NvU64 uaddr,const void * kptr,size_t n)411 int nvkms_copyout(NvU64 uaddr, const void *kptr, size_t n)
412 {
413     if (!nvKmsNvU64AddressIsSafe(uaddr)) {
414         return -EINVAL;
415     }
416 
417     if (copy_to_user(nvKmsNvU64ToPointer(uaddr), kptr, n) != 0) {
418         return -EFAULT;
419     }
420 
421     return 0;
422 }
423 
nvkms_yield(void)424 void nvkms_yield(void)
425 {
426     schedule();
427 }
428 
nvkms_dump_stack(void)429 void nvkms_dump_stack(void)
430 {
431     dump_stack();
432 }
433 
nvkms_snprintf(char * str,size_t size,const char * format,...)434 int nvkms_snprintf(char *str, size_t size, const char *format, ...)
435 {
436     int ret;
437     va_list ap;
438 
439     va_start(ap, format);
440     ret = vsnprintf(str, size, format, ap);
441     va_end(ap);
442 
443     return ret;
444 }
445 
nvkms_vsnprintf(char * str,size_t size,const char * format,va_list ap)446 int nvkms_vsnprintf(char *str, size_t size, const char *format, va_list ap)
447 {
448     return vsnprintf(str, size, format, ap);
449 }
450 
nvkms_log(const int level,const char * gpuPrefix,const char * msg)451 void nvkms_log(const int level, const char *gpuPrefix, const char *msg)
452 {
453     const char *levelString;
454     const char *levelPrefix;
455 
456     switch (level) {
457     default:
458     case NVKMS_LOG_LEVEL_INFO:
459         levelPrefix = "";
460         levelString = KERN_INFO;
461         break;
462     case NVKMS_LOG_LEVEL_WARN:
463         levelPrefix = "WARNING: ";
464         levelString = KERN_WARNING;
465         break;
466     case NVKMS_LOG_LEVEL_ERROR:
467         levelPrefix = "ERROR: ";
468         levelString = KERN_ERR;
469         break;
470     }
471 
472     printk("%s%s%s%s%s\n",
473            levelString, NVKMS_LOG_PREFIX, levelPrefix, gpuPrefix, msg);
474 }
475 
476 void
nvkms_event_queue_changed(nvkms_per_open_handle_t * pOpenKernel,NvBool eventsAvailable)477 nvkms_event_queue_changed(nvkms_per_open_handle_t *pOpenKernel,
478                           NvBool eventsAvailable)
479 {
480     struct nvkms_per_open *popen = pOpenKernel;
481 
482     switch (popen->type) {
483         case NVKMS_CLIENT_USER_SPACE:
484             /*
485              * Write popen->events.available atomically, to avoid any races or
486              * memory barrier issues interacting with nvkms_poll().
487              */
488             atomic_set(&popen->u.user.events.available, eventsAvailable);
489 
490             wake_up_interruptible(&popen->u.user.events.wait_queue);
491 
492             break;
493         case NVKMS_CLIENT_KERNEL_SPACE:
494             if (eventsAvailable) {
495                 nv_kthread_q_schedule_q_item(
496                     &nvkms_kthread_q,
497                     &popen->u.kernel.events.nv_kthread_q_item);
498             }
499 
500             break;
501     }
502 }
503 
nvkms_suspend(NvU32 gpuId)504 static void nvkms_suspend(NvU32 gpuId)
505 {
506     nvKmsKapiSuspendResume(NV_TRUE /* suspend */);
507 
508     if (gpuId == 0) {
509         nvkms_write_lock_pm_lock();
510     }
511 
512     down(&nvkms_lock);
513     nvKmsSuspend(gpuId);
514     up(&nvkms_lock);
515 }
516 
nvkms_resume(NvU32 gpuId)517 static void nvkms_resume(NvU32 gpuId)
518 {
519     down(&nvkms_lock);
520     nvKmsResume(gpuId);
521     up(&nvkms_lock);
522 
523     if (gpuId == 0) {
524         nvkms_write_unlock_pm_lock();
525     }
526 
527     nvKmsKapiSuspendResume(NV_FALSE /* suspend */);
528 }
529 
530 
531 /*************************************************************************
532  * Interface with resman.
533  *************************************************************************/
534 
535 static nvidia_modeset_rm_ops_t __rm_ops = { 0 };
536 static nvidia_modeset_callbacks_t nvkms_rm_callbacks = {
537     .suspend = nvkms_suspend,
538     .resume  = nvkms_resume
539 };
540 
nvkms_alloc_rm(void)541 static int nvkms_alloc_rm(void)
542 {
543     NV_STATUS nvstatus;
544     int ret;
545 
546     __rm_ops.version_string = NV_VERSION_STRING;
547 
548     nvstatus = nvidia_get_rm_ops(&__rm_ops);
549 
550     if (nvstatus != NV_OK) {
551         printk(KERN_ERR NVKMS_LOG_PREFIX "Version mismatch: "
552                "nvidia.ko(%s) nvidia-modeset.ko(%s)\n",
553                __rm_ops.version_string, NV_VERSION_STRING);
554         return -EINVAL;
555     }
556 
557     ret = __rm_ops.set_callbacks(&nvkms_rm_callbacks);
558     if (ret < 0) {
559         printk(KERN_ERR NVKMS_LOG_PREFIX "Failed to register callbacks\n");
560         return ret;
561     }
562 
563     return 0;
564 }
565 
nvkms_free_rm(void)566 static void nvkms_free_rm(void)
567 {
568     __rm_ops.set_callbacks(NULL);
569 }
570 
nvkms_call_rm(void * ops)571 void nvkms_call_rm(void *ops)
572 {
573     nvidia_modeset_stack_ptr stack = NULL;
574 
575     if (__rm_ops.alloc_stack(&stack) != 0) {
576         return;
577     }
578 
579     __rm_ops.op(stack, ops);
580 
581     __rm_ops.free_stack(stack);
582 }
583 
584 /*************************************************************************
585  * ref_ptr implementation.
586  *************************************************************************/
587 
588 struct nvkms_ref_ptr {
589     nv_kref_t refcnt;
590     // Access to ptr is guarded by the nvkms_lock.
591     void *ptr;
592 };
593 
nvkms_alloc_ref_ptr(void * ptr)594 struct nvkms_ref_ptr* nvkms_alloc_ref_ptr(void *ptr)
595 {
596     struct nvkms_ref_ptr *ref_ptr = nvkms_alloc(sizeof(*ref_ptr), NV_FALSE);
597     if (ref_ptr) {
598         // The ref_ptr owner counts as a reference on the ref_ptr itself.
599         nv_kref_init(&ref_ptr->refcnt);
600         ref_ptr->ptr = ptr;
601     }
602     return ref_ptr;
603 }
604 
nvkms_free_ref_ptr(struct nvkms_ref_ptr * ref_ptr)605 void nvkms_free_ref_ptr(struct nvkms_ref_ptr *ref_ptr)
606 {
607     if (ref_ptr) {
608         ref_ptr->ptr = NULL;
609         // Release the owner's reference of the ref_ptr.
610         nvkms_dec_ref(ref_ptr);
611     }
612 }
613 
nvkms_inc_ref(struct nvkms_ref_ptr * ref_ptr)614 void nvkms_inc_ref(struct nvkms_ref_ptr *ref_ptr)
615 {
616     nv_kref_get(&ref_ptr->refcnt);
617 }
618 
ref_ptr_free(nv_kref_t * ref)619 static void ref_ptr_free(nv_kref_t *ref)
620 {
621     struct nvkms_ref_ptr *ref_ptr = container_of(ref, struct nvkms_ref_ptr,
622                                                  refcnt);
623     nvkms_free(ref_ptr, sizeof(*ref_ptr));
624 }
625 
nvkms_dec_ref(struct nvkms_ref_ptr * ref_ptr)626 void* nvkms_dec_ref(struct nvkms_ref_ptr *ref_ptr)
627 {
628     void *ptr = ref_ptr->ptr;
629     nv_kref_put(&ref_ptr->refcnt, ref_ptr_free);
630     return ptr;
631 }
632 
633 /*************************************************************************
634  * Timer support
635  *
636  * Core NVKMS needs to be able to schedule work to execute in the
637  * future, within process context.
638  *
639  * To achieve this, use struct timer_list to schedule a timer
640  * callback, nvkms_timer_callback().  This will execute in softirq
641  * context, so from there schedule an nv_kthread_q item,
642  * nvkms_kthread_q_callback(), which will execute in process context.
643  *************************************************************************/
644 
645 struct nvkms_timer_t {
646     nv_kthread_q_item_t nv_kthread_q_item;
647     struct timer_list kernel_timer;
648     NvBool cancel;
649     NvBool complete;
650     NvBool isRefPtr;
651     NvBool kernel_timer_created;
652     nvkms_timer_proc_t *proc;
653     void *dataPtr;
654     NvU32 dataU32;
655     struct list_head timers_list;
656 };
657 
658 /*
659  * Global list with pending timers, any change requires acquiring lock
660  */
661 static struct {
662     spinlock_t lock;
663     struct list_head list;
664 } nvkms_timers;
665 
nvkms_kthread_q_callback(void * arg)666 static void nvkms_kthread_q_callback(void *arg)
667 {
668     struct nvkms_timer_t *timer = arg;
669     void *dataPtr;
670     unsigned long flags = 0;
671 
672     /*
673      * We can delete this timer from pending timers list - it's being
674      * processed now.
675      */
676     spin_lock_irqsave(&nvkms_timers.lock, flags);
677     list_del(&timer->timers_list);
678     spin_unlock_irqrestore(&nvkms_timers.lock, flags);
679 
680     /*
681      * After kthread_q_callback we want to be sure that timer_callback
682      * for this timer also have finished. It's important during module
683      * unload - this way we can safely unload this module by first deleting
684      * pending timers and than waiting for workqueue callbacks.
685      */
686     if (timer->kernel_timer_created) {
687         del_timer_sync(&timer->kernel_timer);
688     }
689 
690     /*
691      * Block the kthread during system suspend & resume in order to defer
692      * handling of events such as DP_IRQ and hotplugs until after resume.
693      */
694     nvkms_read_lock_pm_lock();
695 
696     down(&nvkms_lock);
697 
698     if (timer->isRefPtr) {
699         // If the object this timer refers to was destroyed, treat the timer as
700         // canceled.
701         dataPtr = nvkms_dec_ref(timer->dataPtr);
702         if (!dataPtr) {
703             timer->cancel = NV_TRUE;
704         }
705     } else {
706         dataPtr = timer->dataPtr;
707     }
708 
709     if (!timer->cancel) {
710         timer->proc(dataPtr, timer->dataU32);
711         timer->complete = NV_TRUE;
712     }
713 
714     if (timer->isRefPtr) {
715         // ref_ptr-based timers are allocated with kmalloc(GFP_ATOMIC).
716         kfree(timer);
717     } else if (timer->cancel) {
718         nvkms_free(timer, sizeof(*timer));
719     }
720 
721     up(&nvkms_lock);
722 
723     nvkms_read_unlock_pm_lock();
724 }
725 
nvkms_queue_work(nv_kthread_q_t * q,nv_kthread_q_item_t * q_item)726 static void nvkms_queue_work(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item)
727 {
728     int ret = nv_kthread_q_schedule_q_item(q, q_item);
729     /*
730      * nv_kthread_q_schedule_q_item should only fail (which it indicates by
731      * returning false) if the item is already scheduled or the queue is
732      * stopped. Neither of those should happen in NVKMS.
733      */
734     WARN_ON(!ret);
735 }
736 
_nvkms_timer_callback_internal(struct nvkms_timer_t * nvkms_timer)737 static void _nvkms_timer_callback_internal(struct nvkms_timer_t *nvkms_timer)
738 {
739     /* In softirq context, so schedule nvkms_kthread_q_callback(). */
740     nvkms_queue_work(&nvkms_kthread_q, &nvkms_timer->nv_kthread_q_item);
741 }
742 
743 /*
744  * Why the "inline" keyword? Because only one of these next two functions will
745  * be used, thus leading to a "defined but not used function" warning. The
746  * "inline" keyword is redefined in the Kbuild system
747  * (see: <kernel>/include/linux/compiler-gcc.h) so as to suppress that warning.
748  */
nvkms_timer_callback_typed_data(struct timer_list * timer)749 inline static void nvkms_timer_callback_typed_data(struct timer_list *timer)
750 {
751     struct nvkms_timer_t *nvkms_timer =
752             container_of(timer, struct nvkms_timer_t, kernel_timer);
753 
754     _nvkms_timer_callback_internal(nvkms_timer);
755 }
756 
nvkms_timer_callback_anon_data(unsigned long arg)757 inline static void nvkms_timer_callback_anon_data(unsigned long arg)
758 {
759     struct nvkms_timer_t *nvkms_timer = (struct nvkms_timer_t *) arg;
760     _nvkms_timer_callback_internal(nvkms_timer);
761 }
762 
763 static void
nvkms_init_timer(struct nvkms_timer_t * timer,nvkms_timer_proc_t * proc,void * dataPtr,NvU32 dataU32,NvBool isRefPtr,NvU64 usec)764 nvkms_init_timer(struct nvkms_timer_t *timer, nvkms_timer_proc_t *proc,
765                  void *dataPtr, NvU32 dataU32, NvBool isRefPtr, NvU64 usec)
766 {
767     unsigned long flags = 0;
768 
769     memset(timer, 0, sizeof(*timer));
770     timer->cancel = NV_FALSE;
771     timer->complete = NV_FALSE;
772     timer->isRefPtr = isRefPtr;
773 
774     timer->proc = proc;
775     timer->dataPtr = dataPtr;
776     timer->dataU32 = dataU32;
777 
778     nv_kthread_q_item_init(&timer->nv_kthread_q_item, nvkms_kthread_q_callback,
779                            timer);
780 
781     /*
782      * After adding timer to timers_list we need to finish referencing it
783      * (calling nvkms_queue_work() or mod_timer()) before releasing the lock.
784      * Otherwise, if the code to free the timer were ever updated to
785      * run in parallel with this, it could race against nvkms_init_timer()
786      * and free the timer before its initialization is complete.
787      */
788     spin_lock_irqsave(&nvkms_timers.lock, flags);
789     list_add(&timer->timers_list, &nvkms_timers.list);
790 
791     if (usec == 0) {
792         timer->kernel_timer_created = NV_FALSE;
793         nvkms_queue_work(&nvkms_kthread_q, &timer->nv_kthread_q_item);
794     } else {
795 #if defined(NV_TIMER_SETUP_PRESENT)
796         timer_setup(&timer->kernel_timer, nvkms_timer_callback_typed_data, 0);
797 #else
798         init_timer(&timer->kernel_timer);
799         timer->kernel_timer.function = nvkms_timer_callback_anon_data;
800         timer->kernel_timer.data = (unsigned long) timer;
801 #endif
802 
803         timer->kernel_timer_created = NV_TRUE;
804         mod_timer(&timer->kernel_timer, jiffies + NVKMS_USECS_TO_JIFFIES(usec));
805     }
806     spin_unlock_irqrestore(&nvkms_timers.lock, flags);
807 }
808 
809 nvkms_timer_handle_t*
nvkms_alloc_timer(nvkms_timer_proc_t * proc,void * dataPtr,NvU32 dataU32,NvU64 usec)810 nvkms_alloc_timer(nvkms_timer_proc_t *proc,
811                   void *dataPtr, NvU32 dataU32,
812                   NvU64 usec)
813 {
814     // nvkms_alloc_timer cannot be called from an interrupt context.
815     struct nvkms_timer_t *timer = nvkms_alloc(sizeof(*timer), NV_FALSE);
816     if (timer) {
817         nvkms_init_timer(timer, proc, dataPtr, dataU32, NV_FALSE, usec);
818     }
819     return timer;
820 }
821 
822 NvBool
nvkms_alloc_timer_with_ref_ptr(nvkms_timer_proc_t * proc,struct nvkms_ref_ptr * ref_ptr,NvU32 dataU32,NvU64 usec)823 nvkms_alloc_timer_with_ref_ptr(nvkms_timer_proc_t *proc,
824                                struct nvkms_ref_ptr *ref_ptr,
825                                NvU32 dataU32, NvU64 usec)
826 {
827     // nvkms_alloc_timer_with_ref_ptr is called from an interrupt bottom half
828     // handler, which runs in a tasklet (i.e. atomic) context.
829     struct nvkms_timer_t *timer = kmalloc(sizeof(*timer), GFP_ATOMIC);
830     if (timer) {
831         // Reference the ref_ptr to make sure that it doesn't get freed before
832         // the timer fires.
833         nvkms_inc_ref(ref_ptr);
834         nvkms_init_timer(timer, proc, ref_ptr, dataU32, NV_TRUE, usec);
835     }
836 
837     return timer != NULL;
838 }
839 
nvkms_free_timer(nvkms_timer_handle_t * handle)840 void nvkms_free_timer(nvkms_timer_handle_t *handle)
841 {
842     struct nvkms_timer_t *timer = handle;
843 
844     if (timer == NULL) {
845         return;
846     }
847 
848     if (timer->complete) {
849         nvkms_free(timer, sizeof(*timer));
850         return;
851     }
852 
853     timer->cancel = NV_TRUE;
854 }
855 
nvkms_fd_is_nvidia_chardev(int fd)856 NvBool nvkms_fd_is_nvidia_chardev(int fd)
857 {
858     struct file *filp = fget(fd);
859     dev_t rdev = 0;
860     NvBool ret = NV_FALSE;
861 
862     if (filp == NULL) {
863         return ret;
864     }
865 
866     if (filp->f_inode == NULL) {
867         goto done;
868     }
869     rdev = filp->f_inode->i_rdev;
870 
871     if (MAJOR(rdev) == NVKMS_MAJOR_DEVICE_NUMBER) {
872         ret = NV_TRUE;
873     }
874 
875 done:
876     fput(filp);
877 
878     return ret;
879 }
880 
nvkms_open_gpu(NvU32 gpuId)881 NvBool nvkms_open_gpu(NvU32 gpuId)
882 {
883     nvidia_modeset_stack_ptr stack = NULL;
884     NvBool ret;
885 
886     if (__rm_ops.alloc_stack(&stack) != 0) {
887         return NV_FALSE;
888     }
889 
890     ret = __rm_ops.open_gpu(gpuId, stack) == 0;
891 
892     __rm_ops.free_stack(stack);
893 
894     return ret;
895 }
896 
nvkms_close_gpu(NvU32 gpuId)897 void nvkms_close_gpu(NvU32 gpuId)
898 {
899     nvidia_modeset_stack_ptr stack = NULL;
900 
901     if (__rm_ops.alloc_stack(&stack) != 0) {
902         return;
903     }
904 
905     __rm_ops.close_gpu(gpuId, stack);
906 
907     __rm_ops.free_stack(stack);
908 }
909 
nvkms_enumerate_gpus(nv_gpu_info_t * gpu_info)910 NvU32 nvkms_enumerate_gpus(nv_gpu_info_t *gpu_info)
911 {
912     return __rm_ops.enumerate_gpus(gpu_info);
913 }
914 
nvkms_allow_write_combining(void)915 NvBool nvkms_allow_write_combining(void)
916 {
917     return __rm_ops.system_info.allow_write_combining;
918 }
919 
920 #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
921 /*************************************************************************
922  * Implementation of sysfs interface to control backlight
923  *************************************************************************/
924 
925 struct nvkms_backlight_device {
926     NvU32 gpu_id;
927     NvU32 display_id;
928 
929     void *drv_priv;
930 
931     struct backlight_device * dev;
932 };
933 
nvkms_update_backlight_status(struct backlight_device * bd)934 static int nvkms_update_backlight_status(struct backlight_device *bd)
935 {
936     struct nvkms_backlight_device *nvkms_bd = bl_get_data(bd);
937     NvBool status;
938     int ret;
939 
940     ret = down_interruptible(&nvkms_lock);
941 
942     if (ret != 0) {
943         return ret;
944     }
945 
946     status = nvKmsSetBacklight(nvkms_bd->display_id, nvkms_bd->drv_priv,
947                                bd->props.brightness);
948 
949     up(&nvkms_lock);
950 
951     return status ? 0 : -EINVAL;
952 }
953 
nvkms_get_backlight_brightness(struct backlight_device * bd)954 static int nvkms_get_backlight_brightness(struct backlight_device *bd)
955 {
956     struct nvkms_backlight_device *nvkms_bd = bl_get_data(bd);
957     NvU32 brightness = 0;
958     NvBool status;
959     int ret;
960 
961     ret = down_interruptible(&nvkms_lock);
962 
963     if (ret != 0) {
964         return ret;
965     }
966 
967     status = nvKmsGetBacklight(nvkms_bd->display_id, nvkms_bd->drv_priv,
968                                &brightness);
969 
970     up(&nvkms_lock);
971 
972     return  status ? brightness : -1;
973 }
974 
975 static const struct backlight_ops nvkms_backlight_ops = {
976     .update_status = nvkms_update_backlight_status,
977     .get_brightness = nvkms_get_backlight_brightness,
978 };
979 #endif /* IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) */
980 
981 struct nvkms_backlight_device*
nvkms_register_backlight(NvU32 gpu_id,NvU32 display_id,void * drv_priv,NvU32 current_brightness)982 nvkms_register_backlight(NvU32 gpu_id, NvU32 display_id, void *drv_priv,
983                          NvU32 current_brightness)
984 {
985 #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
986     char name[18];
987     struct backlight_properties props = {
988         .brightness = current_brightness,
989         .max_brightness = 100,
990         .type = BACKLIGHT_RAW,
991     };
992     nv_gpu_info_t *gpu_info = NULL;
993     NvU32 gpu_count = 0;
994     struct nvkms_backlight_device *nvkms_bd = NULL;
995     int i;
996 
997 #if defined(NV_ACPI_VIDEO_BACKLIGHT_USE_NATIVE)
998     if (!acpi_video_backlight_use_native()) {
999         return NULL;
1000     }
1001 #endif
1002 
1003     gpu_info = nvkms_alloc(NV_MAX_GPUS * sizeof(*gpu_info), NV_TRUE);
1004     if (gpu_info == NULL) {
1005         return NULL;
1006     }
1007 
1008     gpu_count = __rm_ops.enumerate_gpus(gpu_info);
1009     if (gpu_count == 0) {
1010         goto done;
1011     }
1012 
1013     for (i = 0; i < gpu_count; i++) {
1014         if (gpu_info[i].gpu_id == gpu_id) {
1015             break;
1016         }
1017     }
1018 
1019     if (i == gpu_count) {
1020         goto done;
1021     }
1022 
1023     nvkms_bd = nvkms_alloc(sizeof(*nvkms_bd), NV_TRUE);
1024     if (nvkms_bd == NULL) {
1025         goto done;
1026     }
1027 
1028     snprintf(name, sizeof(name), "nvidia_%d", i);
1029     name[sizeof(name) - 1] = '\0';
1030 
1031     nvkms_bd->gpu_id = gpu_id;
1032     nvkms_bd->display_id = display_id;
1033     nvkms_bd->drv_priv = drv_priv;
1034 
1035     nvkms_bd->dev =
1036         backlight_device_register(name,
1037                                   gpu_info[i].os_device_ptr,
1038                                   nvkms_bd,
1039                                   &nvkms_backlight_ops,
1040                                   &props);
1041 
1042 done:
1043     nvkms_free(gpu_info, NV_MAX_GPUS * sizeof(*gpu_info));
1044 
1045     return nvkms_bd;
1046 #else
1047     return NULL;
1048 #endif /* IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) */
1049 }
1050 
nvkms_unregister_backlight(struct nvkms_backlight_device * nvkms_bd)1051 void nvkms_unregister_backlight(struct nvkms_backlight_device *nvkms_bd)
1052 {
1053 #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
1054     if (nvkms_bd->dev) {
1055         backlight_device_unregister(nvkms_bd->dev);
1056     }
1057 
1058     nvkms_free(nvkms_bd, sizeof(*nvkms_bd));
1059 #endif /* IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) */
1060 }
1061 
1062 /*************************************************************************
1063  * Common to both user-space and kapi NVKMS interfaces
1064  *************************************************************************/
1065 
nvkms_kapi_event_kthread_q_callback(void * arg)1066 static void nvkms_kapi_event_kthread_q_callback(void *arg)
1067 {
1068     struct NvKmsKapiDevice *device = arg;
1069 
1070     nvKmsKapiHandleEventQueueChange(device);
1071 }
1072 
nvkms_open_common(enum NvKmsClientType type,struct NvKmsKapiDevice * device,int * status)1073 struct nvkms_per_open *nvkms_open_common(enum NvKmsClientType type,
1074                                          struct NvKmsKapiDevice *device,
1075                                          int *status)
1076 {
1077     struct nvkms_per_open *popen = NULL;
1078 
1079     popen = nvkms_alloc(sizeof(*popen), NV_TRUE);
1080 
1081     if (popen == NULL) {
1082         *status = -ENOMEM;
1083         goto failed;
1084     }
1085 
1086     popen->type = type;
1087 
1088     *status = down_interruptible(&nvkms_lock);
1089 
1090     if (*status != 0) {
1091         goto failed;
1092     }
1093 
1094     popen->data = nvKmsOpen(current->tgid, type, popen);
1095 
1096     up(&nvkms_lock);
1097 
1098     if (popen->data == NULL) {
1099         *status = -EPERM;
1100         goto failed;
1101     }
1102 
1103     switch (popen->type) {
1104         case NVKMS_CLIENT_USER_SPACE:
1105             init_waitqueue_head(&popen->u.user.events.wait_queue);
1106             break;
1107         case NVKMS_CLIENT_KERNEL_SPACE:
1108             nv_kthread_q_item_init(&popen->u.kernel.events.nv_kthread_q_item,
1109                                    nvkms_kapi_event_kthread_q_callback,
1110                                    device);
1111             break;
1112     }
1113 
1114     *status = 0;
1115 
1116     return popen;
1117 
1118 failed:
1119 
1120     nvkms_free(popen, sizeof(*popen));
1121 
1122     return NULL;
1123 }
1124 
nvkms_close_pm_locked(struct nvkms_per_open * popen)1125 void nvkms_close_pm_locked(struct nvkms_per_open *popen)
1126 {
1127     /*
1128      * Don't use down_interruptible(): we need to free resources
1129      * during close, so we have no choice but to wait to take the
1130      * mutex.
1131      */
1132 
1133     down(&nvkms_lock);
1134 
1135     nvKmsClose(popen->data);
1136 
1137     popen->data = NULL;
1138 
1139     up(&nvkms_lock);
1140 
1141     if (popen->type == NVKMS_CLIENT_KERNEL_SPACE) {
1142         /*
1143          * Flush any outstanding nvkms_kapi_event_kthread_q_callback() work
1144          * items before freeing popen.
1145          *
1146          * Note that this must be done after the above nvKmsClose() call, to
1147          * guarantee that no more nvkms_kapi_event_kthread_q_callback() work
1148          * items get scheduled.
1149          *
1150          * Also, note that though popen->data is freed above, any subsequent
1151          * nvkms_kapi_event_kthread_q_callback()'s for this popen should be
1152          * safe: if any nvkms_kapi_event_kthread_q_callback()-initiated work
1153          * attempts to call back into NVKMS, the popen->data==NULL check in
1154          * nvkms_ioctl_common() should reject the request.
1155          */
1156 
1157         nv_kthread_q_flush(&nvkms_kthread_q);
1158     }
1159 
1160     nvkms_free(popen, sizeof(*popen));
1161 }
1162 
nvkms_close_pm_unlocked(void * data)1163 static void nvkms_close_pm_unlocked(void *data)
1164 {
1165     struct nvkms_per_open *popen = data;
1166 
1167     nvkms_read_lock_pm_lock();
1168 
1169     nvkms_close_pm_locked(popen);
1170 
1171     nvkms_read_unlock_pm_lock();
1172 }
1173 
nvkms_close_popen(struct nvkms_per_open * popen)1174 static void nvkms_close_popen(struct nvkms_per_open *popen)
1175 {
1176     if (nvkms_read_trylock_pm_lock() == 0) {
1177         nvkms_close_pm_locked(popen);
1178         nvkms_read_unlock_pm_lock();
1179     } else {
1180         nv_kthread_q_item_init(&popen->deferred_close_q_item,
1181                                nvkms_close_pm_unlocked,
1182                                popen);
1183         nvkms_queue_work(&nvkms_deferred_close_kthread_q,
1184                          &popen->deferred_close_q_item);
1185     }
1186 }
1187 
nvkms_ioctl_common(struct nvkms_per_open * popen,NvU32 cmd,NvU64 address,const size_t size)1188 int nvkms_ioctl_common
1189 (
1190     struct nvkms_per_open *popen,
1191     NvU32 cmd, NvU64 address, const size_t size
1192 )
1193 {
1194     int status;
1195     NvBool ret;
1196 
1197     status = down_interruptible(&nvkms_lock);
1198     if (status != 0) {
1199         return status;
1200     }
1201 
1202     if (popen->data != NULL) {
1203         ret = nvKmsIoctl(popen->data, cmd, address, size);
1204     } else {
1205         ret = NV_FALSE;
1206     }
1207 
1208     up(&nvkms_lock);
1209 
1210     return ret ? 0 : -EPERM;
1211 }
1212 
1213 /*************************************************************************
1214  * NVKMS interface for kernel space NVKMS clients like KAPI
1215  *************************************************************************/
1216 
nvkms_open_from_kapi(struct NvKmsKapiDevice * device)1217 struct nvkms_per_open* nvkms_open_from_kapi
1218 (
1219     struct NvKmsKapiDevice *device
1220 )
1221 {
1222     int status = 0;
1223     struct nvkms_per_open *ret;
1224 
1225     nvkms_read_lock_pm_lock();
1226     ret = nvkms_open_common(NVKMS_CLIENT_KERNEL_SPACE, device, &status);
1227     nvkms_read_unlock_pm_lock();
1228 
1229     return ret;
1230 }
1231 
nvkms_close_from_kapi(struct nvkms_per_open * popen)1232 void nvkms_close_from_kapi(struct nvkms_per_open *popen)
1233 {
1234     nvkms_close_pm_unlocked(popen);
1235 }
1236 
nvkms_ioctl_from_kapi(struct nvkms_per_open * popen,NvU32 cmd,void * params_address,const size_t param_size)1237 NvBool nvkms_ioctl_from_kapi
1238 (
1239     struct nvkms_per_open *popen,
1240     NvU32 cmd, void *params_address, const size_t param_size
1241 )
1242 {
1243     NvBool ret;
1244 
1245     nvkms_read_lock_pm_lock();
1246     ret = nvkms_ioctl_common(popen,
1247                              cmd,
1248                              (NvU64)(NvUPtr)params_address, param_size) == 0;
1249     nvkms_read_unlock_pm_lock();
1250 
1251     return ret;
1252 }
1253 
1254 /*************************************************************************
1255  * APIs for locking.
1256  *************************************************************************/
1257 
1258 struct nvkms_sema_t {
1259     struct semaphore os_sema;
1260 };
1261 
nvkms_sema_alloc(void)1262 nvkms_sema_handle_t* nvkms_sema_alloc(void)
1263 {
1264     nvkms_sema_handle_t *sema = nvkms_alloc(sizeof(*sema), NV_TRUE);
1265 
1266     if (sema != NULL) {
1267         sema_init(&sema->os_sema, 1);
1268     }
1269 
1270     return sema;
1271 }
1272 
nvkms_sema_free(nvkms_sema_handle_t * sema)1273 void nvkms_sema_free(nvkms_sema_handle_t *sema)
1274 {
1275     nvkms_free(sema, sizeof(*sema));
1276 }
1277 
nvkms_sema_down(nvkms_sema_handle_t * sema)1278 void nvkms_sema_down(nvkms_sema_handle_t *sema)
1279 {
1280     down(&sema->os_sema);
1281 }
1282 
nvkms_sema_up(nvkms_sema_handle_t * sema)1283 void nvkms_sema_up(nvkms_sema_handle_t *sema)
1284 {
1285     up(&sema->os_sema);
1286 }
1287 
1288 /*************************************************************************
1289  * Procfs files support code.
1290  *************************************************************************/
1291 
1292 #if defined(CONFIG_PROC_FS)
1293 
1294 #define NV_DEFINE_SINGLE_NVKMS_PROCFS_FILE(name) \
1295     NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(name, nvkms_pm_lock)
1296 
1297 #define NVKMS_PROCFS_FOLDER "driver/nvidia-modeset"
1298 
1299 struct proc_dir_entry *nvkms_proc_dir;
1300 
nv_procfs_out_string(void * data,const char * str)1301 static void nv_procfs_out_string(void *data, const char *str)
1302 {
1303     struct seq_file *s = data;
1304 
1305     seq_puts(s, str);
1306 }
1307 
nv_procfs_read_nvkms_proc(struct seq_file * s,void * arg)1308 static int nv_procfs_read_nvkms_proc(struct seq_file *s, void *arg)
1309 {
1310     char *buffer;
1311     nvkms_procfs_proc_t *func;
1312 
1313 #define NVKMS_PROCFS_STRING_SIZE 8192
1314 
1315     func = s->private;
1316     if (func == NULL) {
1317         return 0;
1318     }
1319 
1320     buffer = nvkms_alloc(NVKMS_PROCFS_STRING_SIZE, NV_TRUE);
1321 
1322     if (buffer != NULL) {
1323         int status = down_interruptible(&nvkms_lock);
1324 
1325         if (status != 0) {
1326             nvkms_free(buffer, NVKMS_PROCFS_STRING_SIZE);
1327             return status;
1328         }
1329 
1330         func(s, buffer, NVKMS_PROCFS_STRING_SIZE, &nv_procfs_out_string);
1331 
1332         up(&nvkms_lock);
1333 
1334         nvkms_free(buffer, NVKMS_PROCFS_STRING_SIZE);
1335     }
1336 
1337     return 0;
1338 }
1339 
1340 NV_DEFINE_SINGLE_NVKMS_PROCFS_FILE(nvkms_proc);
1341 
1342 static NvBool
nvkms_add_proc_file(const nvkms_procfs_file_t * file)1343 nvkms_add_proc_file(const nvkms_procfs_file_t *file)
1344 {
1345     struct proc_dir_entry *new_proc_dir;
1346 
1347     if (nvkms_proc_dir == NULL) {
1348         return NV_FALSE;
1349     }
1350 
1351     new_proc_dir = proc_create_data(file->name, 0, nvkms_proc_dir,
1352                                     &nv_procfs_nvkms_proc_fops, file->func);
1353     return (new_proc_dir != NULL);
1354 }
1355 
1356 #endif /* defined(CONFIG_PROC_FS) */
1357 
nvkms_proc_init(void)1358 static void nvkms_proc_init(void)
1359 {
1360 #if defined(CONFIG_PROC_FS)
1361     const nvkms_procfs_file_t *file;
1362 
1363     nvkms_proc_dir = NULL;
1364     nvKmsGetProcFiles(&file);
1365 
1366     if (file == NULL || file->name == NULL) {
1367         return;
1368     }
1369 
1370     nvkms_proc_dir = NV_CREATE_PROC_DIR(NVKMS_PROCFS_FOLDER, NULL);
1371     if (nvkms_proc_dir == NULL) {
1372         return;
1373     }
1374 
1375     while (file->name != NULL) {
1376         if (!nvkms_add_proc_file(file)) {
1377             nvkms_log(NVKMS_LOG_LEVEL_WARN, NVKMS_LOG_PREFIX,
1378                       "Failed to create proc file");
1379             break;
1380         }
1381         file++;
1382     }
1383 #endif
1384 }
1385 
nvkms_proc_exit(void)1386 static void nvkms_proc_exit(void)
1387 {
1388 #if defined(CONFIG_PROC_FS)
1389     if (nvkms_proc_dir == NULL) {
1390         return;
1391     }
1392 
1393     proc_remove(nvkms_proc_dir);
1394 #endif /* CONFIG_PROC_FS */
1395 }
1396 
1397 /*************************************************************************
1398  * NVKMS Config File Read
1399  ************************************************************************/
1400 #if NVKMS_CONFIG_FILE_SUPPORTED
nvkms_fs_mounted(void)1401 static NvBool nvkms_fs_mounted(void)
1402 {
1403     return current->fs != NULL;
1404 }
1405 
nvkms_config_file_open(char * fname,char ** const buff)1406 static size_t nvkms_config_file_open
1407 (
1408     char *fname,
1409     char ** const buff
1410 )
1411 {
1412     int i = 0;
1413     struct file *file;
1414     struct inode *file_inode;
1415     size_t file_size = 0;
1416     size_t read_size = 0;
1417 #if defined(NV_KERNEL_READ_HAS_POINTER_POS_ARG)
1418     loff_t pos = 0;
1419 #endif
1420 
1421     if (!nvkms_fs_mounted()) {
1422         printk(KERN_ERR NVKMS_LOG_PREFIX "ERROR: Filesystems not mounted\n");
1423         return 0;
1424     }
1425 
1426     file = filp_open(fname, O_RDONLY, 0);
1427     if (file == NULL || IS_ERR(file)) {
1428         printk(KERN_WARNING NVKMS_LOG_PREFIX "WARNING: Failed to open %s\n",
1429                fname);
1430         return 0;
1431     }
1432 
1433     file_inode = file->f_inode;
1434     if (file_inode == NULL || IS_ERR(file_inode)) {
1435         printk(KERN_WARNING NVKMS_LOG_PREFIX "WARNING: Inode is invalid\n");
1436         goto done;
1437     }
1438     file_size = file_inode->i_size;
1439     if (file_size > NVKMS_READ_FILE_MAX_SIZE) {
1440         printk(KERN_WARNING NVKMS_LOG_PREFIX "WARNING: File exceeds maximum size\n");
1441         goto done;
1442     }
1443 
1444     *buff = nvkms_alloc(file_size, NV_FALSE);
1445     if (*buff == NULL) {
1446         printk(KERN_WARNING NVKMS_LOG_PREFIX "WARNING: Out of memory\n");
1447         goto done;
1448     }
1449 
1450     /*
1451      * TODO: Once we have access to GPL symbols, this can be replaced with
1452      * kernel_read_file for kernels >= 4.6
1453      */
1454     while ((read_size < file_size) && (i++ < NVKMS_READ_FILE_MAX_LOOPS)) {
1455 #if defined(NV_KERNEL_READ_HAS_POINTER_POS_ARG)
1456         ssize_t ret = kernel_read(file, *buff + read_size,
1457                                   file_size - read_size, &pos);
1458 #else
1459         ssize_t ret = kernel_read(file, read_size,
1460                                   *buff + read_size,
1461                                   file_size - read_size);
1462 #endif
1463         if (ret <= 0) {
1464             break;
1465         }
1466         read_size += ret;
1467     }
1468 
1469     if (read_size != file_size) {
1470         printk(KERN_WARNING NVKMS_LOG_PREFIX "WARNING: Failed to read %s\n",
1471                fname);
1472         goto done;
1473     }
1474 
1475     filp_close(file, current->files);
1476     return file_size;
1477 
1478 done:
1479     nvkms_free(*buff, file_size);
1480     filp_close(file, current->files);
1481     return 0;
1482 }
1483 
1484 /* must be called with nvkms_lock locked */
nvkms_read_config_file_locked(void)1485 static void nvkms_read_config_file_locked(void)
1486 {
1487     char *buffer = NULL;
1488     size_t buf_size = 0;
1489 
1490     /* only read the config file if the kernel parameter is set */
1491     if (!NVKMS_CONF_FILE_SPECIFIED) {
1492         return;
1493     }
1494 
1495     buf_size = nvkms_config_file_open(nvkms_conf, &buffer);
1496 
1497     if (buf_size == 0) {
1498         return;
1499     }
1500 
1501     if (nvKmsReadConf(buffer, buf_size, nvkms_config_file_open)) {
1502         printk(KERN_INFO NVKMS_LOG_PREFIX "Successfully read %s\n",
1503                nvkms_conf);
1504     }
1505 
1506     nvkms_free(buffer, buf_size);
1507 }
1508 #else
nvkms_read_config_file_locked(void)1509 static void nvkms_read_config_file_locked(void)
1510 {
1511 }
1512 #endif
1513 
1514 /*************************************************************************
1515  * NVKMS KAPI functions
1516  ************************************************************************/
1517 
nvKmsKapiGetFunctionsTable(struct NvKmsKapiFunctionsTable * funcsTable)1518 NvBool nvKmsKapiGetFunctionsTable
1519 (
1520     struct NvKmsKapiFunctionsTable *funcsTable
1521 )
1522 {
1523     return nvKmsKapiGetFunctionsTableInternal(funcsTable);
1524 }
1525 EXPORT_SYMBOL(nvKmsKapiGetFunctionsTable);
1526 
1527 /*************************************************************************
1528  * File operation callback functions.
1529  *************************************************************************/
1530 
nvkms_open(struct inode * inode,struct file * filp)1531 static int nvkms_open(struct inode *inode, struct file *filp)
1532 {
1533     int status;
1534 
1535     status = nv_down_read_interruptible(&nvkms_pm_lock);
1536     if (status != 0) {
1537         return status;
1538     }
1539 
1540     filp->private_data =
1541         nvkms_open_common(NVKMS_CLIENT_USER_SPACE, NULL, &status);
1542 
1543     nvkms_read_unlock_pm_lock();
1544 
1545     return status;
1546 }
1547 
nvkms_close(struct inode * inode,struct file * filp)1548 static int nvkms_close(struct inode *inode, struct file *filp)
1549 {
1550     struct nvkms_per_open *popen = filp->private_data;
1551 
1552     if (popen == NULL) {
1553         return -EINVAL;
1554     }
1555 
1556     nvkms_close_popen(popen);
1557     return 0;
1558 }
1559 
nvkms_mmap(struct file * filp,struct vm_area_struct * vma)1560 static int nvkms_mmap(struct file *filp, struct vm_area_struct *vma)
1561 {
1562     return -EPERM;
1563 }
1564 
nvkms_ioctl(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)1565 static int nvkms_ioctl(struct inode *inode, struct file *filp,
1566                            unsigned int cmd, unsigned long arg)
1567 {
1568     size_t size;
1569     unsigned int nr;
1570     int status;
1571     struct NvKmsIoctlParams params;
1572     struct nvkms_per_open *popen = filp->private_data;
1573 
1574     if ((popen == NULL) || (popen->data == NULL)) {
1575         return -EINVAL;
1576     }
1577 
1578     size = _IOC_SIZE(cmd);
1579     nr = _IOC_NR(cmd);
1580 
1581     /* The only supported ioctl is NVKMS_IOCTL_CMD. */
1582 
1583     if ((nr != NVKMS_IOCTL_CMD) || (size != sizeof(struct NvKmsIoctlParams))) {
1584         return -ENOTTY;
1585     }
1586 
1587     status = copy_from_user(&params, (void *) arg, size);
1588     if (status != 0) {
1589         return -EFAULT;
1590     }
1591 
1592     status = nv_down_read_interruptible(&nvkms_pm_lock);
1593     if (status != 0) {
1594         return status;
1595     }
1596 
1597     status = nvkms_ioctl_common(popen,
1598                                 params.cmd,
1599                                 params.address,
1600                                 params.size);
1601 
1602     nvkms_read_unlock_pm_lock();
1603 
1604     return status;
1605 }
1606 
nvkms_unlocked_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)1607 static long nvkms_unlocked_ioctl(struct file *filp, unsigned int cmd,
1608                                  unsigned long arg)
1609 {
1610     return nvkms_ioctl(filp->f_inode, filp, cmd, arg);
1611 }
1612 
nvkms_poll(struct file * filp,poll_table * wait)1613 static unsigned int nvkms_poll(struct file *filp, poll_table *wait)
1614 {
1615     unsigned int mask = 0;
1616     struct nvkms_per_open *popen = filp->private_data;
1617 
1618     if ((popen == NULL) || (popen->data == NULL)) {
1619         return mask;
1620     }
1621 
1622     BUG_ON(popen->type != NVKMS_CLIENT_USER_SPACE);
1623 
1624     if ((filp->f_flags & O_NONBLOCK) == 0) {
1625         poll_wait(filp, &popen->u.user.events.wait_queue, wait);
1626     }
1627 
1628     if (atomic_read(&popen->u.user.events.available)) {
1629         mask = POLLPRI | POLLIN;
1630     }
1631 
1632     return mask;
1633 }
1634 
1635 
1636 /*************************************************************************
1637  * Module loading support code.
1638  *************************************************************************/
1639 
1640 #define NVKMS_RDEV  (MKDEV(NV_MAJOR_DEVICE_NUMBER, \
1641                            NV_MINOR_DEVICE_NUMBER_MODESET_DEVICE))
1642 
1643 static struct file_operations nvkms_fops = {
1644     .owner       = THIS_MODULE,
1645     .poll        = nvkms_poll,
1646     .unlocked_ioctl = nvkms_unlocked_ioctl,
1647 #if NVCPU_IS_X86_64 || NVCPU_IS_AARCH64
1648     .compat_ioctl = nvkms_unlocked_ioctl,
1649 #endif
1650     .mmap        = nvkms_mmap,
1651     .open        = nvkms_open,
1652     .release     = nvkms_close,
1653 };
1654 
1655 static struct cdev nvkms_device_cdev;
1656 
nvkms_register_chrdev(void)1657 static int __init nvkms_register_chrdev(void)
1658 {
1659     int ret;
1660 
1661     ret = register_chrdev_region(NVKMS_RDEV, 1, "nvidia-modeset");
1662     if (ret < 0) {
1663         return ret;
1664     }
1665 
1666     cdev_init(&nvkms_device_cdev, &nvkms_fops);
1667     ret = cdev_add(&nvkms_device_cdev, NVKMS_RDEV, 1);
1668     if (ret < 0) {
1669         unregister_chrdev_region(NVKMS_RDEV, 1);
1670         return ret;
1671     }
1672 
1673     return ret;
1674 }
1675 
nvkms_unregister_chrdev(void)1676 static void nvkms_unregister_chrdev(void)
1677 {
1678     cdev_del(&nvkms_device_cdev);
1679     unregister_chrdev_region(NVKMS_RDEV, 1);
1680 }
1681 
nvkms_get_per_open_data(int fd)1682 void* nvkms_get_per_open_data(int fd)
1683 {
1684     struct file *filp = fget(fd);
1685     void *data = NULL;
1686 
1687     if (filp) {
1688         if (filp->f_op == &nvkms_fops && filp->private_data) {
1689             struct nvkms_per_open *popen = filp->private_data;
1690             data = popen->data;
1691         }
1692 
1693         /*
1694          * fget() incremented the struct file's reference count, which needs to
1695          * be balanced with a call to fput().  It is safe to decrement the
1696          * reference count before returning filp->private_data because core
1697          * NVKMS is currently holding the nvkms_lock, which prevents the
1698          * nvkms_close() => nvKmsClose() call chain from freeing the file out
1699          * from under the caller of nvkms_get_per_open_data().
1700          */
1701         fput(filp);
1702     }
1703 
1704     return data;
1705 }
1706 
nvkms_init(void)1707 static int __init nvkms_init(void)
1708 {
1709     int ret;
1710 
1711     atomic_set(&nvkms_alloc_called_count, 0);
1712 
1713     ret = nvkms_alloc_rm();
1714 
1715     if (ret != 0) {
1716         return ret;
1717     }
1718 
1719     sema_init(&nvkms_lock, 1);
1720     init_rwsem(&nvkms_pm_lock);
1721 
1722     ret = nv_kthread_q_init(&nvkms_kthread_q,
1723                             "nvidia-modeset/kthread_q");
1724     if (ret != 0) {
1725         goto fail_kthread;
1726     }
1727 
1728     ret = nv_kthread_q_init(&nvkms_deferred_close_kthread_q,
1729                             "nvidia-modeset/deferred_close_kthread_q");
1730     if (ret != 0) {
1731         goto fail_deferred_close_kthread;
1732     }
1733 
1734     INIT_LIST_HEAD(&nvkms_timers.list);
1735     spin_lock_init(&nvkms_timers.lock);
1736 
1737     ret = nvkms_register_chrdev();
1738     if (ret != 0) {
1739         goto fail_register_chrdev;
1740     }
1741 
1742     down(&nvkms_lock);
1743     if (!nvKmsModuleLoad()) {
1744         ret = -ENOMEM;
1745     }
1746     if (ret != 0) {
1747         up(&nvkms_lock);
1748         goto fail_module_load;
1749     }
1750     nvkms_read_config_file_locked();
1751     up(&nvkms_lock);
1752 
1753     nvkms_proc_init();
1754 
1755     return 0;
1756 
1757 fail_module_load:
1758     nvkms_unregister_chrdev();
1759 fail_register_chrdev:
1760     nv_kthread_q_stop(&nvkms_deferred_close_kthread_q);
1761 fail_deferred_close_kthread:
1762     nv_kthread_q_stop(&nvkms_kthread_q);
1763 fail_kthread:
1764     nvkms_free_rm();
1765 
1766     return ret;
1767 }
1768 
nvkms_exit(void)1769 static void __exit nvkms_exit(void)
1770 {
1771     struct nvkms_timer_t *timer, *tmp_timer;
1772     unsigned long flags = 0;
1773 
1774     nvkms_proc_exit();
1775 
1776     down(&nvkms_lock);
1777     nvKmsModuleUnload();
1778     up(&nvkms_lock);
1779 
1780     /*
1781      * At this point, any pending tasks should be marked canceled, but
1782      * we still need to drain them, so that nvkms_kthread_q_callback() doesn't
1783      * get called after the module is unloaded.
1784      */
1785 restart:
1786     spin_lock_irqsave(&nvkms_timers.lock, flags);
1787 
1788     list_for_each_entry_safe(timer, tmp_timer, &nvkms_timers.list, timers_list) {
1789         if (timer->kernel_timer_created) {
1790             /*
1791              * We delete pending timers and check whether it was being executed
1792              * (returns 0) or we have deactivated it before execution (returns 1).
1793              * If it began execution, the kthread_q callback will wait for timer
1794              * completion, and we wait for queue completion with
1795              * nv_kthread_q_stop below.
1796              */
1797             if (del_timer_sync(&timer->kernel_timer) == 1) {
1798                 /*  We've deactivated timer so we need to clean after it */
1799                 list_del(&timer->timers_list);
1800 
1801                 /* We need to unlock spinlock because we are freeing memory which
1802                  * may sleep */
1803                 spin_unlock_irqrestore(&nvkms_timers.lock, flags);
1804 
1805                 if (timer->isRefPtr) {
1806                     nvkms_dec_ref(timer->dataPtr);
1807                     kfree(timer);
1808                 } else {
1809                     nvkms_free(timer, sizeof(*timer));
1810                 }
1811 
1812                 /* List could change when we were freeing memory. */
1813                 goto restart;
1814             }
1815         }
1816     }
1817 
1818     spin_unlock_irqrestore(&nvkms_timers.lock, flags);
1819 
1820     nv_kthread_q_stop(&nvkms_deferred_close_kthread_q);
1821     nv_kthread_q_stop(&nvkms_kthread_q);
1822 
1823     nvkms_unregister_chrdev();
1824     nvkms_free_rm();
1825 
1826     if (malloc_verbose) {
1827         printk(KERN_INFO NVKMS_LOG_PREFIX "Total allocations: %d\n",
1828                atomic_read(&nvkms_alloc_called_count));
1829     }
1830 }
1831 
1832 module_init(nvkms_init);
1833 module_exit(nvkms_exit);
1834 
1835   MODULE_LICENSE("Dual MIT/GPL");
1836 
1837 MODULE_INFO(supported, "external");
1838 MODULE_VERSION(NV_VERSION_STRING);
1839