1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2015-21 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/string.h>
28 #include <linux/delay.h>
29 #include <linux/vmalloc.h>
30 #include <asm/div64.h> /* do_div() */
31 #include <linux/sched.h>
32 #include <linux/wait.h>
33 #include <linux/random.h>
34 #include <linux/file.h>
35 #include <linux/list.h>
36 #include <linux/rwsem.h>
37 #include <linux/freezer.h>
38 
39 #include <acpi/video.h>
40 
41 #include "nvstatus.h"
42 
43 #include "nv-register-module.h"
44 #include "nv-modeset-interface.h"
45 #include "nv-kref.h"
46 
47 #include "nvidia-modeset-os-interface.h"
48 #include "nvkms.h"
49 #include "nvkms-ioctl.h"
50 
51 #include "conftest.h"
52 #include "nv-procfs.h"
53 #include "nv-kthread-q.h"
54 #include "nv-time.h"
55 #include "nv-lock.h"
56 
57 #if !defined(CONFIG_RETPOLINE)
58 #include "nv-retpoline.h"
59 #endif
60 
61 #include <linux/backlight.h>
62 
63 #define NVKMS_LOG_PREFIX "nvidia-modeset: "
64 
65 static bool output_rounding_fix = true;
66 module_param_named(output_rounding_fix, output_rounding_fix, bool, 0400);
67 
68 static bool disable_vrr_memclk_switch = false;
69 module_param_named(disable_vrr_memclk_switch, disable_vrr_memclk_switch, bool, 0400);
70 
71 /* These parameters are used for fault injection tests.  Normally the defaults
72  * should be used. */
73 MODULE_PARM_DESC(fail_malloc, "Fail the Nth call to nvkms_alloc");
74 static int fail_malloc_num = -1;
75 module_param_named(fail_malloc, fail_malloc_num, int, 0400);
76 
77 MODULE_PARM_DESC(malloc_verbose, "Report information about malloc calls on module unload");
78 static bool malloc_verbose = false;
79 module_param_named(malloc_verbose, malloc_verbose, bool, 0400);
80 
81 /* This parameter is used to find the dpy override conf file */
82 #define NVKMS_CONF_FILE_SPECIFIED (nvkms_conf != NULL)
83 
84 MODULE_PARM_DESC(config_file,
85                  "Path to the nvidia-modeset configuration file "
86                  "(default: disabled)");
87 static char *nvkms_conf = NULL;
88 module_param_named(config_file, nvkms_conf, charp, 0400);
89 
90 static atomic_t nvkms_alloc_called_count;
91 
92 NvBool nvkms_output_rounding_fix(void)
93 {
94     return output_rounding_fix;
95 }
96 
97 NvBool nvkms_disable_vrr_memclk_switch(void)
98 {
99     return disable_vrr_memclk_switch;
100 }
101 
102 #define NVKMS_SYNCPT_STUBS_NEEDED
103 
104 /*************************************************************************
105  * NVKMS interface for nvhost unit for sync point APIs.
106  *************************************************************************/
107 
108 #ifdef NVKMS_SYNCPT_STUBS_NEEDED
109 /* Unsupported STUB for nvkms_syncpt APIs */
110 NvBool nvkms_syncpt_op(
111     enum NvKmsSyncPtOp op,
112     NvKmsSyncPtOpParams *params)
113 {
114     return NV_FALSE;
115 }
116 #endif
117 
118 #define NVKMS_MAJOR_DEVICE_NUMBER 195
119 #define NVKMS_MINOR_DEVICE_NUMBER 254
120 
121 /*
122  * Convert from microseconds to jiffies.  The conversion is:
123  * ((usec) * HZ / 1000000)
124  *
125  * Use do_div() to avoid gcc-generated references to __udivdi3().
126  * Note that the do_div() macro divides the first argument in place.
127  */
128 static inline unsigned long NVKMS_USECS_TO_JIFFIES(NvU64 usec)
129 {
130     unsigned long result = usec * HZ;
131     do_div(result, 1000000);
132     return result;
133 }
134 
135 
136 /*************************************************************************
137  * NVKMS uses a global lock, nvkms_lock.  The lock is taken in the
138  * file operation callback functions when calling into core NVKMS.
139  *************************************************************************/
140 
141 static struct semaphore nvkms_lock;
142 
143 /*************************************************************************
144  * User clients of NVKMS may need to be synchronized with suspend/resume
145  * operations.  This depends on the state of the system when the NVKMS
146  * suspend/resume callbacks are invoked.  NVKMS uses a single
147  * RW lock, nvkms_pm_lock, for this synchronization.
148  *************************************************************************/
149 
150 static struct rw_semaphore nvkms_pm_lock;
151 
152 /*************************************************************************
153  * NVKMS executes almost all of its queued work items on a single
154  * kthread.  The exception are deferred close() handlers, which typically
155  * block for long periods of time and stall their queue.
156  *************************************************************************/
157 
158 static struct nv_kthread_q nvkms_kthread_q;
159 static struct nv_kthread_q nvkms_deferred_close_kthread_q;
160 
161 /*************************************************************************
162  * The nvkms_per_open structure tracks data that is specific to a
163  * single open.
164  *************************************************************************/
165 
166 struct nvkms_per_open {
167     void *data;
168 
169     enum NvKmsClientType type;
170 
171     union {
172         struct {
173             struct {
174                 atomic_t available;
175                 wait_queue_head_t wait_queue;
176             } events;
177         } user;
178 
179         struct {
180             struct {
181                 nv_kthread_q_item_t nv_kthread_q_item;
182             } events;
183         } kernel;
184     } u;
185 
186     nv_kthread_q_item_t deferred_close_q_item;
187 };
188 
189 /*************************************************************************
190  * nvkms_pm_lock helper functions.  Since no down_read_interruptible()
191  * or equivalent interface is available, it needs to be approximated with
192  * down_read_trylock() to enable the kernel's freezer to round up user
193  * threads going into suspend.
194  *************************************************************************/
195 
196 static inline int nvkms_read_trylock_pm_lock(void)
197 {
198     return !down_read_trylock(&nvkms_pm_lock);
199 }
200 
201 static inline void nvkms_read_lock_pm_lock(void)
202 {
203     while (!down_read_trylock(&nvkms_pm_lock)) {
204         try_to_freeze();
205         cond_resched();
206     }
207 }
208 
209 static inline void nvkms_read_unlock_pm_lock(void)
210 {
211     up_read(&nvkms_pm_lock);
212 }
213 
214 static inline void nvkms_write_lock_pm_lock(void)
215 {
216     down_write(&nvkms_pm_lock);
217 }
218 
219 static inline void nvkms_write_unlock_pm_lock(void)
220 {
221     up_write(&nvkms_pm_lock);
222 }
223 
224 /*************************************************************************
225  * nvidia-modeset-os-interface.h functions.  It is assumed that these
226  * are called while nvkms_lock is held.
227  *************************************************************************/
228 
229 /* Don't use kmalloc for allocations larger than one page */
230 #define KMALLOC_LIMIT PAGE_SIZE
231 
232 void* nvkms_alloc(size_t size, NvBool zero)
233 {
234     void *p;
235 
236     if (malloc_verbose || fail_malloc_num >= 0) {
237         int this_alloc = atomic_inc_return(&nvkms_alloc_called_count) - 1;
238         if (fail_malloc_num >= 0 && fail_malloc_num == this_alloc) {
239             printk(KERN_WARNING NVKMS_LOG_PREFIX "Failing alloc %d\n",
240                    fail_malloc_num);
241             return NULL;
242         }
243     }
244 
245     if (size <= KMALLOC_LIMIT) {
246         p = kmalloc(size, GFP_KERNEL);
247     } else {
248         p = vmalloc(size);
249     }
250 
251     if (zero && (p != NULL)) {
252         memset(p, 0, size);
253     }
254 
255     return p;
256 }
257 
258 void nvkms_free(void *ptr, size_t size)
259 {
260     if (size <= KMALLOC_LIMIT) {
261         kfree(ptr);
262     } else {
263         vfree(ptr);
264     }
265 }
266 
267 void* nvkms_memset(void *ptr, NvU8 c, size_t size)
268 {
269     return memset(ptr, c, size);
270 }
271 
272 void* nvkms_memcpy(void *dest, const void *src, size_t n)
273 {
274     return memcpy(dest, src, n);
275 }
276 
277 void* nvkms_memmove(void *dest, const void *src, size_t n)
278 {
279     return memmove(dest, src, n);
280 }
281 
282 int nvkms_memcmp(const void *s1, const void *s2, size_t n)
283 {
284     return memcmp(s1, s2, n);
285 }
286 
287 size_t nvkms_strlen(const char *s)
288 {
289     return strlen(s);
290 }
291 
292 int nvkms_strcmp(const char *s1, const char *s2)
293 {
294     return strcmp(s1, s2);
295 }
296 
297 char* nvkms_strncpy(char *dest, const char *src, size_t n)
298 {
299     return strncpy(dest, src, n);
300 }
301 
302 void nvkms_usleep(NvU64 usec)
303 {
304     if (usec < 1000) {
305         /*
306          * If the period to wait is less than one millisecond, sleep
307          * using udelay(); note this is a busy wait.
308          */
309         udelay(usec);
310     } else {
311         /*
312          * Otherwise, sleep with millisecond precision.  Clamp the
313          * time to ~4 seconds (0xFFF/1000 => 4.09 seconds).
314          *
315          * Note that the do_div() macro divides the first argument in
316          * place.
317          */
318 
319         int msec;
320         NvU64 tmp = usec + 500;
321         do_div(tmp, 1000);
322         msec = (int) (tmp & 0xFFF);
323 
324         /*
325          * XXX NVKMS TODO: this may need to be msleep_interruptible(),
326          * though the callers would need to be made to handle
327          * returning early.
328          */
329         msleep(msec);
330     }
331 }
332 
333 NvU64 nvkms_get_usec(void)
334 {
335     struct timespec64 ts;
336     NvU64 ns;
337 
338     ktime_get_real_ts64(&ts);
339 
340     ns = timespec64_to_ns(&ts);
341     return ns / 1000;
342 }
343 
344 int nvkms_copyin(void *kptr, NvU64 uaddr, size_t n)
345 {
346     if (!nvKmsNvU64AddressIsSafe(uaddr)) {
347         return -EINVAL;
348     }
349 
350     if (copy_from_user(kptr, nvKmsNvU64ToPointer(uaddr), n) != 0) {
351         return -EFAULT;
352     }
353 
354     return 0;
355 }
356 
357 int nvkms_copyout(NvU64 uaddr, const void *kptr, size_t n)
358 {
359     if (!nvKmsNvU64AddressIsSafe(uaddr)) {
360         return -EINVAL;
361     }
362 
363     if (copy_to_user(nvKmsNvU64ToPointer(uaddr), kptr, n) != 0) {
364         return -EFAULT;
365     }
366 
367     return 0;
368 }
369 
370 void nvkms_yield(void)
371 {
372     schedule();
373 }
374 
375 void nvkms_dump_stack(void)
376 {
377     dump_stack();
378 }
379 
380 int nvkms_snprintf(char *str, size_t size, const char *format, ...)
381 {
382     int ret;
383     va_list ap;
384 
385     va_start(ap, format);
386     ret = vsnprintf(str, size, format, ap);
387     va_end(ap);
388 
389     return ret;
390 }
391 
392 int nvkms_vsnprintf(char *str, size_t size, const char *format, va_list ap)
393 {
394     return vsnprintf(str, size, format, ap);
395 }
396 
397 void nvkms_log(const int level, const char *gpuPrefix, const char *msg)
398 {
399     const char *levelString;
400     const char *levelPrefix;
401 
402     switch (level) {
403     default:
404     case NVKMS_LOG_LEVEL_INFO:
405         levelPrefix = "";
406         levelString = KERN_INFO;
407         break;
408     case NVKMS_LOG_LEVEL_WARN:
409         levelPrefix = "WARNING: ";
410         levelString = KERN_WARNING;
411         break;
412     case NVKMS_LOG_LEVEL_ERROR:
413         levelPrefix = "ERROR: ";
414         levelString = KERN_ERR;
415         break;
416     }
417 
418     printk("%s%s%s%s%s\n",
419            levelString, NVKMS_LOG_PREFIX, levelPrefix, gpuPrefix, msg);
420 }
421 
422 void
423 nvkms_event_queue_changed(nvkms_per_open_handle_t *pOpenKernel,
424                           NvBool eventsAvailable)
425 {
426     struct nvkms_per_open *popen = pOpenKernel;
427 
428     switch (popen->type) {
429         case NVKMS_CLIENT_USER_SPACE:
430             /*
431              * Write popen->events.available atomically, to avoid any races or
432              * memory barrier issues interacting with nvkms_poll().
433              */
434             atomic_set(&popen->u.user.events.available, eventsAvailable);
435 
436             wake_up_interruptible(&popen->u.user.events.wait_queue);
437 
438             break;
439         case NVKMS_CLIENT_KERNEL_SPACE:
440             if (eventsAvailable) {
441                 nv_kthread_q_schedule_q_item(
442                     &nvkms_kthread_q,
443                     &popen->u.kernel.events.nv_kthread_q_item);
444             }
445 
446             break;
447     }
448 }
449 
450 static void nvkms_suspend(NvU32 gpuId)
451 {
452     if (gpuId == 0) {
453         nvkms_write_lock_pm_lock();
454     }
455 
456     down(&nvkms_lock);
457     nvKmsSuspend(gpuId);
458     up(&nvkms_lock);
459 }
460 
461 static void nvkms_resume(NvU32 gpuId)
462 {
463     down(&nvkms_lock);
464     nvKmsResume(gpuId);
465     up(&nvkms_lock);
466 
467     if (gpuId == 0) {
468         nvkms_write_unlock_pm_lock();
469     }
470 }
471 
472 
473 /*************************************************************************
474  * Interface with resman.
475  *************************************************************************/
476 
477 static nvidia_modeset_rm_ops_t __rm_ops = { 0 };
478 static nvidia_modeset_callbacks_t nvkms_rm_callbacks = {
479     .suspend = nvkms_suspend,
480     .resume  = nvkms_resume
481 };
482 
483 static int nvkms_alloc_rm(void)
484 {
485     NV_STATUS nvstatus;
486     int ret;
487 
488     __rm_ops.version_string = NV_VERSION_STRING;
489 
490     nvstatus = nvidia_get_rm_ops(&__rm_ops);
491 
492     if (nvstatus != NV_OK) {
493         printk(KERN_ERR NVKMS_LOG_PREFIX "Version mismatch: "
494                "nvidia.ko(%s) nvidia-modeset.ko(%s)\n",
495                __rm_ops.version_string, NV_VERSION_STRING);
496         return -EINVAL;
497     }
498 
499     ret = __rm_ops.set_callbacks(&nvkms_rm_callbacks);
500     if (ret < 0) {
501         printk(KERN_ERR NVKMS_LOG_PREFIX "Failed to register callbacks\n");
502         return ret;
503     }
504 
505     return 0;
506 }
507 
508 static void nvkms_free_rm(void)
509 {
510     __rm_ops.set_callbacks(NULL);
511 }
512 
513 void nvkms_call_rm(void *ops)
514 {
515     nvidia_modeset_stack_ptr stack = NULL;
516 
517     if (__rm_ops.alloc_stack(&stack) != 0) {
518         return;
519     }
520 
521     __rm_ops.op(stack, ops);
522 
523     __rm_ops.free_stack(stack);
524 }
525 
526 /*************************************************************************
527  * ref_ptr implementation.
528  *************************************************************************/
529 
530 struct nvkms_ref_ptr {
531     nv_kref_t refcnt;
532     // Access to ptr is guarded by the nvkms_lock.
533     void *ptr;
534 };
535 
536 struct nvkms_ref_ptr* nvkms_alloc_ref_ptr(void *ptr)
537 {
538     struct nvkms_ref_ptr *ref_ptr = nvkms_alloc(sizeof(*ref_ptr), NV_FALSE);
539     if (ref_ptr) {
540         // The ref_ptr owner counts as a reference on the ref_ptr itself.
541         nv_kref_init(&ref_ptr->refcnt);
542         ref_ptr->ptr = ptr;
543     }
544     return ref_ptr;
545 }
546 
547 void nvkms_free_ref_ptr(struct nvkms_ref_ptr *ref_ptr)
548 {
549     if (ref_ptr) {
550         ref_ptr->ptr = NULL;
551         // Release the owner's reference of the ref_ptr.
552         nvkms_dec_ref(ref_ptr);
553     }
554 }
555 
556 void nvkms_inc_ref(struct nvkms_ref_ptr *ref_ptr)
557 {
558     nv_kref_get(&ref_ptr->refcnt);
559 }
560 
561 static void ref_ptr_free(nv_kref_t *ref)
562 {
563     struct nvkms_ref_ptr *ref_ptr = container_of(ref, struct nvkms_ref_ptr,
564                                                  refcnt);
565     nvkms_free(ref_ptr, sizeof(*ref_ptr));
566 }
567 
568 void* nvkms_dec_ref(struct nvkms_ref_ptr *ref_ptr)
569 {
570     void *ptr = ref_ptr->ptr;
571     nv_kref_put(&ref_ptr->refcnt, ref_ptr_free);
572     return ptr;
573 }
574 
575 /*************************************************************************
576  * Timer support
577  *
578  * Core NVKMS needs to be able to schedule work to execute in the
579  * future, within process context.
580  *
581  * To achieve this, use struct timer_list to schedule a timer
582  * callback, nvkms_timer_callback().  This will execute in softirq
583  * context, so from there schedule an nv_kthread_q item,
584  * nvkms_kthread_q_callback(), which will execute in process context.
585  *************************************************************************/
586 
587 struct nvkms_timer_t {
588     nv_kthread_q_item_t nv_kthread_q_item;
589     struct timer_list kernel_timer;
590     NvBool cancel;
591     NvBool complete;
592     NvBool isRefPtr;
593     NvBool kernel_timer_created;
594     nvkms_timer_proc_t *proc;
595     void *dataPtr;
596     NvU32 dataU32;
597     struct list_head timers_list;
598 };
599 
600 /*
601  * Global list with pending timers, any change requires acquiring lock
602  */
603 static struct {
604     spinlock_t lock;
605     struct list_head list;
606 } nvkms_timers;
607 
608 static void nvkms_kthread_q_callback(void *arg)
609 {
610     struct nvkms_timer_t *timer = arg;
611     void *dataPtr;
612     unsigned long flags = 0;
613 
614     /*
615      * We can delete this timer from pending timers list - it's being
616      * processed now.
617      */
618     spin_lock_irqsave(&nvkms_timers.lock, flags);
619     list_del(&timer->timers_list);
620     spin_unlock_irqrestore(&nvkms_timers.lock, flags);
621 
622     /*
623      * After kthread_q_callback we want to be sure that timer_callback
624      * for this timer also have finished. It's important during module
625      * unload - this way we can safely unload this module by first deleting
626      * pending timers and than waiting for workqueue callbacks.
627      */
628     if (timer->kernel_timer_created) {
629         del_timer_sync(&timer->kernel_timer);
630     }
631 
632     /*
633      * Block the kthread during system suspend & resume in order to defer
634      * handling of events such as DP_IRQ and hotplugs until after resume.
635      */
636     nvkms_read_lock_pm_lock();
637 
638     down(&nvkms_lock);
639 
640     if (timer->isRefPtr) {
641         // If the object this timer refers to was destroyed, treat the timer as
642         // canceled.
643         dataPtr = nvkms_dec_ref(timer->dataPtr);
644         if (!dataPtr) {
645             timer->cancel = NV_TRUE;
646         }
647     } else {
648         dataPtr = timer->dataPtr;
649     }
650 
651     if (!timer->cancel) {
652         timer->proc(dataPtr, timer->dataU32);
653         timer->complete = NV_TRUE;
654     }
655 
656     if (timer->isRefPtr) {
657         // ref_ptr-based timers are allocated with kmalloc(GFP_ATOMIC).
658         kfree(timer);
659     } else if (timer->cancel) {
660         nvkms_free(timer, sizeof(*timer));
661     }
662 
663     up(&nvkms_lock);
664 
665     nvkms_read_unlock_pm_lock();
666 }
667 
668 static void nvkms_queue_work(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item)
669 {
670     int ret = nv_kthread_q_schedule_q_item(q, q_item);
671     /*
672      * nv_kthread_q_schedule_q_item should only fail (which it indicates by
673      * returning false) if the item is already scheduled or the queue is
674      * stopped. Neither of those should happen in NVKMS.
675      */
676     WARN_ON(!ret);
677 }
678 
679 static void _nvkms_timer_callback_internal(struct nvkms_timer_t *nvkms_timer)
680 {
681     /* In softirq context, so schedule nvkms_kthread_q_callback(). */
682     nvkms_queue_work(&nvkms_kthread_q, &nvkms_timer->nv_kthread_q_item);
683 }
684 
685 /*
686  * Why the "inline" keyword? Because only one of these next two functions will
687  * be used, thus leading to a "defined but not used function" warning. The
688  * "inline" keyword is redefined in the Kbuild system
689  * (see: <kernel>/include/linux/compiler-gcc.h) so as to suppress that warning.
690  */
691 inline static void nvkms_timer_callback_typed_data(struct timer_list *timer)
692 {
693     struct nvkms_timer_t *nvkms_timer =
694             container_of(timer, struct nvkms_timer_t, kernel_timer);
695 
696     _nvkms_timer_callback_internal(nvkms_timer);
697 }
698 
699 inline static void nvkms_timer_callback_anon_data(unsigned long arg)
700 {
701     struct nvkms_timer_t *nvkms_timer = (struct nvkms_timer_t *) arg;
702     _nvkms_timer_callback_internal(nvkms_timer);
703 }
704 
705 static void
706 nvkms_init_timer(struct nvkms_timer_t *timer, nvkms_timer_proc_t *proc,
707                  void *dataPtr, NvU32 dataU32, NvBool isRefPtr, NvU64 usec)
708 {
709     unsigned long flags = 0;
710 
711     memset(timer, 0, sizeof(*timer));
712     timer->cancel = NV_FALSE;
713     timer->complete = NV_FALSE;
714     timer->isRefPtr = isRefPtr;
715 
716     timer->proc = proc;
717     timer->dataPtr = dataPtr;
718     timer->dataU32 = dataU32;
719 
720     nv_kthread_q_item_init(&timer->nv_kthread_q_item, nvkms_kthread_q_callback,
721                            timer);
722 
723     /*
724      * After adding timer to timers_list we need to finish referencing it
725      * (calling nvkms_queue_work() or mod_timer()) before releasing the lock.
726      * Otherwise, if the code to free the timer were ever updated to
727      * run in parallel with this, it could race against nvkms_init_timer()
728      * and free the timer before its initialization is complete.
729      */
730     spin_lock_irqsave(&nvkms_timers.lock, flags);
731     list_add(&timer->timers_list, &nvkms_timers.list);
732 
733     if (usec == 0) {
734         timer->kernel_timer_created = NV_FALSE;
735         nvkms_queue_work(&nvkms_kthread_q, &timer->nv_kthread_q_item);
736     } else {
737 #if defined(NV_TIMER_SETUP_PRESENT)
738         timer_setup(&timer->kernel_timer, nvkms_timer_callback_typed_data, 0);
739 #else
740         init_timer(&timer->kernel_timer);
741         timer->kernel_timer.function = nvkms_timer_callback_anon_data;
742         timer->kernel_timer.data = (unsigned long) timer;
743 #endif
744 
745         timer->kernel_timer_created = NV_TRUE;
746         mod_timer(&timer->kernel_timer, jiffies + NVKMS_USECS_TO_JIFFIES(usec));
747     }
748     spin_unlock_irqrestore(&nvkms_timers.lock, flags);
749 }
750 
751 nvkms_timer_handle_t*
752 nvkms_alloc_timer(nvkms_timer_proc_t *proc,
753                   void *dataPtr, NvU32 dataU32,
754                   NvU64 usec)
755 {
756     // nvkms_alloc_timer cannot be called from an interrupt context.
757     struct nvkms_timer_t *timer = nvkms_alloc(sizeof(*timer), NV_FALSE);
758     if (timer) {
759         nvkms_init_timer(timer, proc, dataPtr, dataU32, NV_FALSE, usec);
760     }
761     return timer;
762 }
763 
764 NvBool
765 nvkms_alloc_timer_with_ref_ptr(nvkms_timer_proc_t *proc,
766                                struct nvkms_ref_ptr *ref_ptr,
767                                NvU32 dataU32, NvU64 usec)
768 {
769     // nvkms_alloc_timer_with_ref_ptr is called from an interrupt bottom half
770     // handler, which runs in a tasklet (i.e. atomic) context.
771     struct nvkms_timer_t *timer = kmalloc(sizeof(*timer), GFP_ATOMIC);
772     if (timer) {
773         // Reference the ref_ptr to make sure that it doesn't get freed before
774         // the timer fires.
775         nvkms_inc_ref(ref_ptr);
776         nvkms_init_timer(timer, proc, ref_ptr, dataU32, NV_TRUE, usec);
777     }
778 
779     return timer != NULL;
780 }
781 
782 void nvkms_free_timer(nvkms_timer_handle_t *handle)
783 {
784     struct nvkms_timer_t *timer = handle;
785 
786     if (timer == NULL) {
787         return;
788     }
789 
790     if (timer->complete) {
791         nvkms_free(timer, sizeof(*timer));
792         return;
793     }
794 
795     timer->cancel = NV_TRUE;
796 }
797 
798 void* nvkms_get_per_open_data(int fd)
799 {
800     struct file *filp = fget(fd);
801     struct nvkms_per_open *popen = NULL;
802     dev_t rdev = 0;
803     void *data = NULL;
804 
805     if (filp == NULL) {
806         return NULL;
807     }
808 
809     if (filp->f_inode == NULL) {
810         goto done;
811     }
812     rdev = filp->f_inode->i_rdev;
813 
814     if ((MAJOR(rdev) != NVKMS_MAJOR_DEVICE_NUMBER) ||
815         (MINOR(rdev) != NVKMS_MINOR_DEVICE_NUMBER)) {
816         goto done;
817     }
818 
819     popen = filp->private_data;
820     if (popen == NULL) {
821         goto done;
822     }
823 
824     data = popen->data;
825 
826 done:
827     /*
828      * fget() incremented the struct file's reference count, which
829      * needs to be balanced with a call to fput().  It is safe to
830      * decrement the reference count before returning
831      * filp->private_data because core NVKMS is currently holding the
832      * nvkms_lock, which prevents the nvkms_close() => nvKmsClose()
833      * call chain from freeing the file out from under the caller of
834      * nvkms_get_per_open_data().
835      */
836     fput(filp);
837 
838     return data;
839 }
840 
841 NvBool nvkms_fd_is_nvidia_chardev(int fd)
842 {
843     struct file *filp = fget(fd);
844     dev_t rdev = 0;
845     NvBool ret = NV_FALSE;
846 
847     if (filp == NULL) {
848         return ret;
849     }
850 
851     if (filp->f_inode == NULL) {
852         goto done;
853     }
854     rdev = filp->f_inode->i_rdev;
855 
856     if (MAJOR(rdev) == NVKMS_MAJOR_DEVICE_NUMBER) {
857         ret = NV_TRUE;
858     }
859 
860 done:
861     fput(filp);
862 
863     return ret;
864 }
865 
866 NvBool nvkms_open_gpu(NvU32 gpuId)
867 {
868     nvidia_modeset_stack_ptr stack = NULL;
869     NvBool ret;
870 
871     if (__rm_ops.alloc_stack(&stack) != 0) {
872         return NV_FALSE;
873     }
874 
875     ret = __rm_ops.open_gpu(gpuId, stack) == 0;
876 
877     __rm_ops.free_stack(stack);
878 
879     return ret;
880 }
881 
882 void nvkms_close_gpu(NvU32 gpuId)
883 {
884     nvidia_modeset_stack_ptr stack = NULL;
885 
886     if (__rm_ops.alloc_stack(&stack) != 0) {
887         return;
888     }
889 
890     __rm_ops.close_gpu(gpuId, stack);
891 
892     __rm_ops.free_stack(stack);
893 }
894 
895 NvU32 nvkms_enumerate_gpus(nv_gpu_info_t *gpu_info)
896 {
897     return __rm_ops.enumerate_gpus(gpu_info);
898 }
899 
900 NvBool nvkms_allow_write_combining(void)
901 {
902     return __rm_ops.system_info.allow_write_combining;
903 }
904 
905 #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
906 /*************************************************************************
907  * Implementation of sysfs interface to control backlight
908  *************************************************************************/
909 
910 struct nvkms_backlight_device {
911     NvU32 gpu_id;
912     NvU32 display_id;
913 
914     void *drv_priv;
915 
916     struct backlight_device * dev;
917 };
918 
919 static int nvkms_update_backlight_status(struct backlight_device *bd)
920 {
921     struct nvkms_backlight_device *nvkms_bd = bl_get_data(bd);
922     NvBool status;
923     int ret;
924 
925     ret = down_interruptible(&nvkms_lock);
926 
927     if (ret != 0) {
928         return ret;
929     }
930 
931     status = nvKmsSetBacklight(nvkms_bd->display_id, nvkms_bd->drv_priv,
932                                bd->props.brightness);
933 
934     up(&nvkms_lock);
935 
936     return status ? 0 : -EINVAL;
937 }
938 
939 static int nvkms_get_backlight_brightness(struct backlight_device *bd)
940 {
941     struct nvkms_backlight_device *nvkms_bd = bl_get_data(bd);
942     NvU32 brightness = 0;
943     NvBool status;
944     int ret;
945 
946     ret = down_interruptible(&nvkms_lock);
947 
948     if (ret != 0) {
949         return ret;
950     }
951 
952     status = nvKmsGetBacklight(nvkms_bd->display_id, nvkms_bd->drv_priv,
953                                &brightness);
954 
955     up(&nvkms_lock);
956 
957     return  status ? brightness : -1;
958 }
959 
960 static const struct backlight_ops nvkms_backlight_ops = {
961     .update_status = nvkms_update_backlight_status,
962     .get_brightness = nvkms_get_backlight_brightness,
963 };
964 #endif /* IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) */
965 
966 struct nvkms_backlight_device*
967 nvkms_register_backlight(NvU32 gpu_id, NvU32 display_id, void *drv_priv,
968                          NvU32 current_brightness)
969 {
970 #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
971     char name[18];
972     struct backlight_properties props = {
973         .brightness = current_brightness,
974         .max_brightness = 100,
975         .type = BACKLIGHT_RAW,
976     };
977     nv_gpu_info_t *gpu_info = NULL;
978     NvU32 gpu_count = 0;
979     struct nvkms_backlight_device *nvkms_bd = NULL;
980     int i;
981 
982 #if defined(NV_ACPI_VIDEO_BACKLIGHT_USE_NATIVE)
983     if (!acpi_video_backlight_use_native()) {
984         return NULL;
985     }
986 #endif
987 
988     gpu_info = nvkms_alloc(NV_MAX_GPUS * sizeof(*gpu_info), NV_TRUE);
989     if (gpu_info == NULL) {
990         return NULL;
991     }
992 
993     gpu_count = __rm_ops.enumerate_gpus(gpu_info);
994     if (gpu_count == 0) {
995         goto done;
996     }
997 
998     for (i = 0; i < gpu_count; i++) {
999         if (gpu_info[i].gpu_id == gpu_id) {
1000             break;
1001         }
1002     }
1003 
1004     if (i == gpu_count) {
1005         goto done;
1006     }
1007 
1008     nvkms_bd = nvkms_alloc(sizeof(*nvkms_bd), NV_TRUE);
1009     if (nvkms_bd == NULL) {
1010         goto done;
1011     }
1012 
1013     snprintf(name, sizeof(name), "nvidia_%d", i);
1014     name[sizeof(name) - 1] = '\0';
1015 
1016     nvkms_bd->gpu_id = gpu_id;
1017     nvkms_bd->display_id = display_id;
1018     nvkms_bd->drv_priv = drv_priv;
1019 
1020     nvkms_bd->dev =
1021         backlight_device_register(name,
1022                                   gpu_info[i].os_device_ptr,
1023                                   nvkms_bd,
1024                                   &nvkms_backlight_ops,
1025                                   &props);
1026 
1027 done:
1028     nvkms_free(gpu_info, NV_MAX_GPUS * sizeof(*gpu_info));
1029 
1030     return nvkms_bd;
1031 #else
1032     return NULL;
1033 #endif /* IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) */
1034 }
1035 
1036 void nvkms_unregister_backlight(struct nvkms_backlight_device *nvkms_bd)
1037 {
1038 #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
1039     if (nvkms_bd->dev) {
1040         backlight_device_unregister(nvkms_bd->dev);
1041     }
1042 
1043     nvkms_free(nvkms_bd, sizeof(*nvkms_bd));
1044 #endif /* IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) */
1045 }
1046 
1047 /*************************************************************************
1048  * Common to both user-space and kapi NVKMS interfaces
1049  *************************************************************************/
1050 
1051 static void nvkms_kapi_event_kthread_q_callback(void *arg)
1052 {
1053     struct NvKmsKapiDevice *device = arg;
1054 
1055     nvKmsKapiHandleEventQueueChange(device);
1056 }
1057 
1058 struct nvkms_per_open *nvkms_open_common(enum NvKmsClientType type,
1059                                          struct NvKmsKapiDevice *device,
1060                                          int *status)
1061 {
1062     struct nvkms_per_open *popen = NULL;
1063 
1064     popen = nvkms_alloc(sizeof(*popen), NV_TRUE);
1065 
1066     if (popen == NULL) {
1067         *status = -ENOMEM;
1068         goto failed;
1069     }
1070 
1071     popen->type = type;
1072 
1073     *status = down_interruptible(&nvkms_lock);
1074 
1075     if (*status != 0) {
1076         goto failed;
1077     }
1078 
1079     popen->data = nvKmsOpen(current->tgid, type, popen);
1080 
1081     up(&nvkms_lock);
1082 
1083     if (popen->data == NULL) {
1084         *status = -EPERM;
1085         goto failed;
1086     }
1087 
1088     switch (popen->type) {
1089         case NVKMS_CLIENT_USER_SPACE:
1090             init_waitqueue_head(&popen->u.user.events.wait_queue);
1091             break;
1092         case NVKMS_CLIENT_KERNEL_SPACE:
1093             nv_kthread_q_item_init(&popen->u.kernel.events.nv_kthread_q_item,
1094                                    nvkms_kapi_event_kthread_q_callback,
1095                                    device);
1096             break;
1097     }
1098 
1099     *status = 0;
1100 
1101     return popen;
1102 
1103 failed:
1104 
1105     nvkms_free(popen, sizeof(*popen));
1106 
1107     return NULL;
1108 }
1109 
1110 void nvkms_close_pm_locked(struct nvkms_per_open *popen)
1111 {
1112     /*
1113      * Don't use down_interruptible(): we need to free resources
1114      * during close, so we have no choice but to wait to take the
1115      * mutex.
1116      */
1117 
1118     down(&nvkms_lock);
1119 
1120     nvKmsClose(popen->data);
1121 
1122     popen->data = NULL;
1123 
1124     up(&nvkms_lock);
1125 
1126     if (popen->type == NVKMS_CLIENT_KERNEL_SPACE) {
1127         /*
1128          * Flush any outstanding nvkms_kapi_event_kthread_q_callback() work
1129          * items before freeing popen.
1130          *
1131          * Note that this must be done after the above nvKmsClose() call, to
1132          * guarantee that no more nvkms_kapi_event_kthread_q_callback() work
1133          * items get scheduled.
1134          *
1135          * Also, note that though popen->data is freed above, any subsequent
1136          * nvkms_kapi_event_kthread_q_callback()'s for this popen should be
1137          * safe: if any nvkms_kapi_event_kthread_q_callback()-initiated work
1138          * attempts to call back into NVKMS, the popen->data==NULL check in
1139          * nvkms_ioctl_common() should reject the request.
1140          */
1141 
1142         nv_kthread_q_flush(&nvkms_kthread_q);
1143     }
1144 
1145     nvkms_free(popen, sizeof(*popen));
1146 }
1147 
1148 static void nvkms_close_pm_unlocked(void *data)
1149 {
1150     struct nvkms_per_open *popen = data;
1151 
1152     nvkms_read_lock_pm_lock();
1153 
1154     nvkms_close_pm_locked(popen);
1155 
1156     nvkms_read_unlock_pm_lock();
1157 }
1158 
1159 static void nvkms_close_popen(struct nvkms_per_open *popen)
1160 {
1161     if (nvkms_read_trylock_pm_lock() == 0) {
1162         nvkms_close_pm_locked(popen);
1163         nvkms_read_unlock_pm_lock();
1164     } else {
1165         nv_kthread_q_item_init(&popen->deferred_close_q_item,
1166                                nvkms_close_pm_unlocked,
1167                                popen);
1168         nvkms_queue_work(&nvkms_deferred_close_kthread_q,
1169                          &popen->deferred_close_q_item);
1170     }
1171 }
1172 
1173 int nvkms_ioctl_common
1174 (
1175     struct nvkms_per_open *popen,
1176     NvU32 cmd, NvU64 address, const size_t size
1177 )
1178 {
1179     int status;
1180     NvBool ret;
1181 
1182     status = down_interruptible(&nvkms_lock);
1183     if (status != 0) {
1184         return status;
1185     }
1186 
1187     if (popen->data != NULL) {
1188         ret = nvKmsIoctl(popen->data, cmd, address, size);
1189     } else {
1190         ret = NV_FALSE;
1191     }
1192 
1193     up(&nvkms_lock);
1194 
1195     return ret ? 0 : -EPERM;
1196 }
1197 
1198 /*************************************************************************
1199  * NVKMS interface for kernel space NVKMS clients like KAPI
1200  *************************************************************************/
1201 
1202 struct nvkms_per_open* nvkms_open_from_kapi
1203 (
1204     struct NvKmsKapiDevice *device
1205 )
1206 {
1207     int status = 0;
1208     struct nvkms_per_open *ret;
1209 
1210     nvkms_read_lock_pm_lock();
1211     ret = nvkms_open_common(NVKMS_CLIENT_KERNEL_SPACE, device, &status);
1212     nvkms_read_unlock_pm_lock();
1213 
1214     return ret;
1215 }
1216 
1217 void nvkms_close_from_kapi(struct nvkms_per_open *popen)
1218 {
1219     nvkms_close_pm_unlocked(popen);
1220 }
1221 
1222 NvBool nvkms_ioctl_from_kapi
1223 (
1224     struct nvkms_per_open *popen,
1225     NvU32 cmd, void *params_address, const size_t param_size
1226 )
1227 {
1228     NvBool ret;
1229 
1230     nvkms_read_lock_pm_lock();
1231     ret = nvkms_ioctl_common(popen,
1232                              cmd,
1233                              (NvU64)(NvUPtr)params_address, param_size) == 0;
1234     nvkms_read_unlock_pm_lock();
1235 
1236     return ret;
1237 }
1238 
1239 /*************************************************************************
1240  * APIs for locking.
1241  *************************************************************************/
1242 
1243 struct nvkms_sema_t {
1244     struct semaphore os_sema;
1245 };
1246 
1247 nvkms_sema_handle_t* nvkms_sema_alloc(void)
1248 {
1249     nvkms_sema_handle_t *sema = nvkms_alloc(sizeof(*sema), NV_TRUE);
1250 
1251     if (sema != NULL) {
1252         sema_init(&sema->os_sema, 1);
1253     }
1254 
1255     return sema;
1256 }
1257 
1258 void nvkms_sema_free(nvkms_sema_handle_t *sema)
1259 {
1260     nvkms_free(sema, sizeof(*sema));
1261 }
1262 
1263 void nvkms_sema_down(nvkms_sema_handle_t *sema)
1264 {
1265     down(&sema->os_sema);
1266 }
1267 
1268 void nvkms_sema_up(nvkms_sema_handle_t *sema)
1269 {
1270     up(&sema->os_sema);
1271 }
1272 
1273 /*************************************************************************
1274  * Procfs files support code.
1275  *************************************************************************/
1276 
1277 #if defined(CONFIG_PROC_FS)
1278 
1279 #define NV_DEFINE_SINGLE_NVKMS_PROCFS_FILE(name) \
1280     NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(name, nvkms_pm_lock)
1281 
1282 #define NVKMS_PROCFS_FOLDER "driver/nvidia-modeset"
1283 
1284 struct proc_dir_entry *nvkms_proc_dir;
1285 
1286 static void nv_procfs_out_string(void *data, const char *str)
1287 {
1288     struct seq_file *s = data;
1289 
1290     seq_puts(s, str);
1291 }
1292 
1293 static int nv_procfs_read_nvkms_proc(struct seq_file *s, void *arg)
1294 {
1295     char *buffer;
1296     nvkms_procfs_proc_t *func;
1297 
1298 #define NVKMS_PROCFS_STRING_SIZE 8192
1299 
1300     func = s->private;
1301     if (func == NULL) {
1302         return 0;
1303     }
1304 
1305     buffer = nvkms_alloc(NVKMS_PROCFS_STRING_SIZE, NV_TRUE);
1306 
1307     if (buffer != NULL) {
1308         int status = down_interruptible(&nvkms_lock);
1309 
1310         if (status != 0) {
1311             nvkms_free(buffer, NVKMS_PROCFS_STRING_SIZE);
1312             return status;
1313         }
1314 
1315         func(s, buffer, NVKMS_PROCFS_STRING_SIZE, &nv_procfs_out_string);
1316 
1317         up(&nvkms_lock);
1318 
1319         nvkms_free(buffer, NVKMS_PROCFS_STRING_SIZE);
1320     }
1321 
1322     return 0;
1323 }
1324 
1325 NV_DEFINE_SINGLE_NVKMS_PROCFS_FILE(nvkms_proc);
1326 
1327 static NvBool
1328 nvkms_add_proc_file(const nvkms_procfs_file_t *file)
1329 {
1330     struct proc_dir_entry *new_proc_dir;
1331 
1332     if (nvkms_proc_dir == NULL) {
1333         return NV_FALSE;
1334     }
1335 
1336     new_proc_dir = proc_create_data(file->name, 0, nvkms_proc_dir,
1337                                     &nv_procfs_nvkms_proc_fops, file->func);
1338     return (new_proc_dir != NULL);
1339 }
1340 
1341 #endif /* defined(CONFIG_PROC_FS) */
1342 
1343 static void nvkms_proc_init(void)
1344 {
1345 #if defined(CONFIG_PROC_FS)
1346     const nvkms_procfs_file_t *file;
1347 
1348     nvkms_proc_dir = NULL;
1349     nvKmsGetProcFiles(&file);
1350 
1351     if (file == NULL || file->name == NULL) {
1352         return;
1353     }
1354 
1355     nvkms_proc_dir = NV_CREATE_PROC_DIR(NVKMS_PROCFS_FOLDER, NULL);
1356     if (nvkms_proc_dir == NULL) {
1357         return;
1358     }
1359 
1360     while (file->name != NULL) {
1361         if (!nvkms_add_proc_file(file)) {
1362             nvkms_log(NVKMS_LOG_LEVEL_WARN, NVKMS_LOG_PREFIX,
1363                       "Failed to create proc file");
1364             break;
1365         }
1366         file++;
1367     }
1368 #endif
1369 }
1370 
1371 static void nvkms_proc_exit(void)
1372 {
1373 #if defined(CONFIG_PROC_FS)
1374     if (nvkms_proc_dir == NULL) {
1375         return;
1376     }
1377 
1378     proc_remove(nvkms_proc_dir);
1379 #endif /* CONFIG_PROC_FS */
1380 }
1381 
1382 /*************************************************************************
1383  * NVKMS Config File Read
1384  ************************************************************************/
1385 static NvBool nvkms_fs_mounted(void)
1386 {
1387     return current->fs != NULL;
1388 }
1389 
1390 static size_t nvkms_config_file_open
1391 (
1392     char *fname,
1393     char ** const buff
1394 )
1395 {
1396     int i = 0;
1397     struct file *file;
1398     struct inode *file_inode;
1399     size_t file_size = 0;
1400     size_t read_size = 0;
1401 #if defined(NV_KERNEL_READ_HAS_POINTER_POS_ARG)
1402     loff_t pos = 0;
1403 #endif
1404 
1405     if (!nvkms_fs_mounted()) {
1406         printk(KERN_ERR NVKMS_LOG_PREFIX "ERROR: Filesystems not mounted\n");
1407         return 0;
1408     }
1409 
1410     file = filp_open(fname, O_RDONLY, 0);
1411     if (file == NULL || IS_ERR(file)) {
1412         printk(KERN_WARNING NVKMS_LOG_PREFIX "WARNING: Failed to open %s\n",
1413                fname);
1414         return 0;
1415     }
1416 
1417     file_inode = file->f_inode;
1418     if (file_inode == NULL || IS_ERR(file_inode)) {
1419         printk(KERN_WARNING NVKMS_LOG_PREFIX "WARNING: Inode is invalid\n");
1420         goto done;
1421     }
1422     file_size = file_inode->i_size;
1423     if (file_size > NVKMS_READ_FILE_MAX_SIZE) {
1424         printk(KERN_WARNING NVKMS_LOG_PREFIX "WARNING: File exceeds maximum size\n");
1425         goto done;
1426     }
1427 
1428     *buff = nvkms_alloc(file_size, NV_FALSE);
1429     if (*buff == NULL) {
1430         printk(KERN_WARNING NVKMS_LOG_PREFIX "WARNING: Out of memory\n");
1431         goto done;
1432     }
1433 
1434     /*
1435      * TODO: Once we have access to GPL symbols, this can be replaced with
1436      * kernel_read_file for kernels >= 4.6
1437      */
1438     while ((read_size < file_size) && (i++ < NVKMS_READ_FILE_MAX_LOOPS)) {
1439 #if defined(NV_KERNEL_READ_HAS_POINTER_POS_ARG)
1440         ssize_t ret = kernel_read(file, *buff + read_size,
1441                                   file_size - read_size, &pos);
1442 #else
1443         ssize_t ret = kernel_read(file, read_size,
1444                                   *buff + read_size,
1445                                   file_size - read_size);
1446 #endif
1447         if (ret <= 0) {
1448             break;
1449         }
1450         read_size += ret;
1451     }
1452 
1453     if (read_size != file_size) {
1454         printk(KERN_WARNING NVKMS_LOG_PREFIX "WARNING: Failed to read %s\n",
1455                fname);
1456         goto done;
1457     }
1458 
1459     filp_close(file, current->files);
1460     return file_size;
1461 
1462 done:
1463     nvkms_free(*buff, file_size);
1464     filp_close(file, current->files);
1465     return 0;
1466 }
1467 
1468 /* must be called with nvkms_lock locked */
1469 static void nvkms_read_config_file_locked(void)
1470 {
1471     char *buffer = NULL;
1472     size_t buf_size = 0;
1473 
1474     /* only read the config file if the kernel parameter is set */
1475     if (!NVKMS_CONF_FILE_SPECIFIED) {
1476         return;
1477     }
1478 
1479     buf_size = nvkms_config_file_open(nvkms_conf, &buffer);
1480 
1481     if (buf_size == 0) {
1482         return;
1483     }
1484 
1485     if (nvKmsReadConf(buffer, buf_size, nvkms_config_file_open)) {
1486         printk(KERN_INFO NVKMS_LOG_PREFIX "Successfully read %s\n",
1487                nvkms_conf);
1488     }
1489 
1490     nvkms_free(buffer, buf_size);
1491 }
1492 
1493 /*************************************************************************
1494  * NVKMS KAPI functions
1495  ************************************************************************/
1496 
1497 NvBool nvKmsKapiGetFunctionsTable
1498 (
1499     struct NvKmsKapiFunctionsTable *funcsTable
1500 )
1501 {
1502     return nvKmsKapiGetFunctionsTableInternal(funcsTable);
1503 }
1504 EXPORT_SYMBOL(nvKmsKapiGetFunctionsTable);
1505 
1506 /*************************************************************************
1507  * File operation callback functions.
1508  *************************************************************************/
1509 
1510 static int nvkms_open(struct inode *inode, struct file *filp)
1511 {
1512     int status;
1513 
1514     status = nv_down_read_interruptible(&nvkms_pm_lock);
1515     if (status != 0) {
1516         return status;
1517     }
1518 
1519     filp->private_data =
1520         nvkms_open_common(NVKMS_CLIENT_USER_SPACE, NULL, &status);
1521 
1522     nvkms_read_unlock_pm_lock();
1523 
1524     return status;
1525 }
1526 
1527 static int nvkms_close(struct inode *inode, struct file *filp)
1528 {
1529     struct nvkms_per_open *popen = filp->private_data;
1530 
1531     if (popen == NULL) {
1532         return -EINVAL;
1533     }
1534 
1535     nvkms_close_popen(popen);
1536     return 0;
1537 }
1538 
1539 static int nvkms_mmap(struct file *filp, struct vm_area_struct *vma)
1540 {
1541     return -EPERM;
1542 }
1543 
1544 static int nvkms_ioctl(struct inode *inode, struct file *filp,
1545                            unsigned int cmd, unsigned long arg)
1546 {
1547     size_t size;
1548     unsigned int nr;
1549     int status;
1550     struct NvKmsIoctlParams params;
1551     struct nvkms_per_open *popen = filp->private_data;
1552 
1553     if ((popen == NULL) || (popen->data == NULL)) {
1554         return -EINVAL;
1555     }
1556 
1557     size = _IOC_SIZE(cmd);
1558     nr = _IOC_NR(cmd);
1559 
1560     /* The only supported ioctl is NVKMS_IOCTL_CMD. */
1561 
1562     if ((nr != NVKMS_IOCTL_CMD) || (size != sizeof(struct NvKmsIoctlParams))) {
1563         return -ENOTTY;
1564     }
1565 
1566     status = copy_from_user(&params, (void *) arg, size);
1567     if (status != 0) {
1568         return -EFAULT;
1569     }
1570 
1571     status = nv_down_read_interruptible(&nvkms_pm_lock);
1572     if (status != 0) {
1573         return status;
1574     }
1575 
1576     status = nvkms_ioctl_common(popen,
1577                                 params.cmd,
1578                                 params.address,
1579                                 params.size);
1580 
1581     nvkms_read_unlock_pm_lock();
1582 
1583     return status;
1584 }
1585 
1586 static unsigned int nvkms_poll(struct file *filp, poll_table *wait)
1587 {
1588     unsigned int mask = 0;
1589     struct nvkms_per_open *popen = filp->private_data;
1590 
1591     if ((popen == NULL) || (popen->data == NULL)) {
1592         return mask;
1593     }
1594 
1595     BUG_ON(popen->type != NVKMS_CLIENT_USER_SPACE);
1596 
1597     if ((filp->f_flags & O_NONBLOCK) == 0) {
1598         poll_wait(filp, &popen->u.user.events.wait_queue, wait);
1599     }
1600 
1601     if (atomic_read(&popen->u.user.events.available)) {
1602         mask = POLLPRI | POLLIN;
1603     }
1604 
1605     return mask;
1606 }
1607 
1608 
1609 /*************************************************************************
1610  * Module loading support code.
1611  *************************************************************************/
1612 
1613 static nvidia_module_t nvidia_modeset_module = {
1614     .owner       = THIS_MODULE,
1615     .module_name = "nvidia-modeset",
1616     .instance    = 1, /* minor number: 255-1=254 */
1617     .open        = nvkms_open,
1618     .close       = nvkms_close,
1619     .mmap        = nvkms_mmap,
1620     .ioctl       = nvkms_ioctl,
1621     .poll        = nvkms_poll,
1622 };
1623 
1624 static int __init nvkms_init(void)
1625 {
1626     int ret;
1627 
1628     atomic_set(&nvkms_alloc_called_count, 0);
1629 
1630     ret = nvkms_alloc_rm();
1631 
1632     if (ret != 0) {
1633         return ret;
1634     }
1635 
1636     sema_init(&nvkms_lock, 1);
1637     init_rwsem(&nvkms_pm_lock);
1638 
1639     ret = nv_kthread_q_init(&nvkms_kthread_q,
1640                             "nvidia-modeset/kthread_q");
1641     if (ret != 0) {
1642         goto fail_kthread;
1643     }
1644 
1645     ret = nv_kthread_q_init(&nvkms_deferred_close_kthread_q,
1646                             "nvidia-modeset/deferred_close_kthread_q");
1647     if (ret != 0) {
1648         goto fail_deferred_close_kthread;
1649     }
1650 
1651     INIT_LIST_HEAD(&nvkms_timers.list);
1652     spin_lock_init(&nvkms_timers.lock);
1653 
1654     ret = nvidia_register_module(&nvidia_modeset_module);
1655 
1656     if (ret != 0) {
1657         goto fail_register_module;
1658     }
1659 
1660     down(&nvkms_lock);
1661     if (!nvKmsModuleLoad()) {
1662         ret = -ENOMEM;
1663     }
1664     if (ret != 0) {
1665         up(&nvkms_lock);
1666         goto fail_module_load;
1667     }
1668     nvkms_read_config_file_locked();
1669     up(&nvkms_lock);
1670 
1671     nvkms_proc_init();
1672 
1673     return 0;
1674 
1675 fail_module_load:
1676     nvidia_unregister_module(&nvidia_modeset_module);
1677 fail_register_module:
1678     nv_kthread_q_stop(&nvkms_deferred_close_kthread_q);
1679 fail_deferred_close_kthread:
1680     nv_kthread_q_stop(&nvkms_kthread_q);
1681 fail_kthread:
1682     nvkms_free_rm();
1683 
1684     return ret;
1685 }
1686 
1687 static void __exit nvkms_exit(void)
1688 {
1689     struct nvkms_timer_t *timer, *tmp_timer;
1690     unsigned long flags = 0;
1691 
1692     nvkms_proc_exit();
1693 
1694     down(&nvkms_lock);
1695     nvKmsModuleUnload();
1696     up(&nvkms_lock);
1697 
1698     /*
1699      * At this point, any pending tasks should be marked canceled, but
1700      * we still need to drain them, so that nvkms_kthread_q_callback() doesn't
1701      * get called after the module is unloaded.
1702      */
1703 restart:
1704     spin_lock_irqsave(&nvkms_timers.lock, flags);
1705 
1706     list_for_each_entry_safe(timer, tmp_timer, &nvkms_timers.list, timers_list) {
1707         if (timer->kernel_timer_created) {
1708             /*
1709              * We delete pending timers and check whether it was being executed
1710              * (returns 0) or we have deactivated it before execution (returns 1).
1711              * If it began execution, the kthread_q callback will wait for timer
1712              * completion, and we wait for queue completion with
1713              * nv_kthread_q_stop below.
1714              */
1715             if (del_timer_sync(&timer->kernel_timer) == 1) {
1716                 /*  We've deactivated timer so we need to clean after it */
1717                 list_del(&timer->timers_list);
1718 
1719                 /* We need to unlock spinlock because we are freeing memory which
1720                  * may sleep */
1721                 spin_unlock_irqrestore(&nvkms_timers.lock, flags);
1722 
1723                 if (timer->isRefPtr) {
1724                     nvkms_dec_ref(timer->dataPtr);
1725                     kfree(timer);
1726                 } else {
1727                     nvkms_free(timer, sizeof(*timer));
1728                 }
1729 
1730                 /* List could change when we were freeing memory. */
1731                 goto restart;
1732             }
1733         }
1734     }
1735 
1736     spin_unlock_irqrestore(&nvkms_timers.lock, flags);
1737 
1738     nv_kthread_q_stop(&nvkms_deferred_close_kthread_q);
1739     nv_kthread_q_stop(&nvkms_kthread_q);
1740 
1741     nvidia_unregister_module(&nvidia_modeset_module);
1742     nvkms_free_rm();
1743 
1744     if (malloc_verbose) {
1745         printk(KERN_INFO NVKMS_LOG_PREFIX "Total allocations: %d\n",
1746                atomic_read(&nvkms_alloc_called_count));
1747     }
1748 }
1749 
1750 module_init(nvkms_init);
1751 module_exit(nvkms_exit);
1752 
1753   MODULE_LICENSE("Dual MIT/GPL");
1754 
1755 MODULE_INFO(supported, "external");
1756 MODULE_VERSION(NV_VERSION_STRING);
1757