1 /* $OpenBSD: drm_linux.c,v 1.112 2024/03/30 13:33:20 mpi Exp $ */
2 /*
3 * Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org>
4 * Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 #include <sys/types.h>
20 #include <sys/systm.h>
21 #include <sys/param.h>
22 #include <sys/event.h>
23 #include <sys/filedesc.h>
24 #include <sys/kthread.h>
25 #include <sys/stat.h>
26 #include <sys/unistd.h>
27 #include <sys/proc.h>
28 #include <sys/pool.h>
29 #include <sys/fcntl.h>
30
31 #include <dev/pci/ppbreg.h>
32
33 #include <linux/dma-buf.h>
34 #include <linux/mod_devicetable.h>
35 #include <linux/acpi.h>
36 #include <linux/pagevec.h>
37 #include <linux/dma-fence-array.h>
38 #include <linux/dma-fence-chain.h>
39 #include <linux/interrupt.h>
40 #include <linux/err.h>
41 #include <linux/idr.h>
42 #include <linux/scatterlist.h>
43 #include <linux/i2c.h>
44 #include <linux/pci.h>
45 #include <linux/notifier.h>
46 #include <linux/backlight.h>
47 #include <linux/shrinker.h>
48 #include <linux/fb.h>
49 #include <linux/xarray.h>
50 #include <linux/interval_tree.h>
51 #include <linux/kthread.h>
52 #include <linux/processor.h>
53 #include <linux/sync_file.h>
54
55 #include <drm/drm_device.h>
56 #include <drm/drm_connector.h>
57 #include <drm/drm_print.h>
58
59 #if defined(__amd64__) || defined(__i386__)
60 #include "bios.h"
61 #endif
62
63 /* allowed to sleep */
64 void
tasklet_unlock_wait(struct tasklet_struct * ts)65 tasklet_unlock_wait(struct tasklet_struct *ts)
66 {
67 while (test_bit(TASKLET_STATE_RUN, &ts->state))
68 cpu_relax();
69 }
70
71 /* must not sleep */
72 void
tasklet_unlock_spin_wait(struct tasklet_struct * ts)73 tasklet_unlock_spin_wait(struct tasklet_struct *ts)
74 {
75 while (test_bit(TASKLET_STATE_RUN, &ts->state))
76 cpu_relax();
77 }
78
79 void
tasklet_run(void * arg)80 tasklet_run(void *arg)
81 {
82 struct tasklet_struct *ts = arg;
83
84 clear_bit(TASKLET_STATE_SCHED, &ts->state);
85 if (tasklet_trylock(ts)) {
86 if (!atomic_read(&ts->count)) {
87 if (ts->use_callback)
88 ts->callback(ts);
89 else
90 ts->func(ts->data);
91 }
92 tasklet_unlock(ts);
93 }
94 }
95
96 /* 32 bit powerpc lacks 64 bit atomics */
97 #if defined(__powerpc__) && !defined(__powerpc64__)
98 struct mutex atomic64_mtx = MUTEX_INITIALIZER(IPL_HIGH);
99 #endif
100
101 void
set_current_state(int state)102 set_current_state(int state)
103 {
104 int prio = state;
105
106 KASSERT(state != TASK_RUNNING);
107 /* check if already on the sleep list */
108 if (curproc->p_wchan != NULL)
109 return;
110 sleep_setup(curproc, prio, "schto");
111 }
112
113 void
__set_current_state(int state)114 __set_current_state(int state)
115 {
116 struct proc *p = curproc;
117 int s;
118
119 KASSERT(state == TASK_RUNNING);
120 SCHED_LOCK(s);
121 unsleep(p);
122 p->p_stat = SONPROC;
123 atomic_clearbits_int(&p->p_flag, P_WSLEEP);
124 SCHED_UNLOCK(s);
125 }
126
127 void
schedule(void)128 schedule(void)
129 {
130 schedule_timeout(MAX_SCHEDULE_TIMEOUT);
131 }
132
133 long
schedule_timeout(long timeout)134 schedule_timeout(long timeout)
135 {
136 unsigned long deadline;
137 int timo = 0;
138
139 KASSERT(!cold);
140
141 if (timeout != MAX_SCHEDULE_TIMEOUT)
142 timo = timeout;
143 if (timeout != MAX_SCHEDULE_TIMEOUT)
144 deadline = jiffies + timeout;
145 sleep_finish(timo, timeout > 0);
146 if (timeout != MAX_SCHEDULE_TIMEOUT)
147 timeout = deadline - jiffies;
148
149 return timeout > 0 ? timeout : 0;
150 }
151
152 long
schedule_timeout_uninterruptible(long timeout)153 schedule_timeout_uninterruptible(long timeout)
154 {
155 tsleep(curproc, PWAIT, "schtou", timeout);
156 return 0;
157 }
158
159 int
wake_up_process(struct proc * p)160 wake_up_process(struct proc *p)
161 {
162 int s, rv;
163
164 SCHED_LOCK(s);
165 rv = wakeup_proc(p, 0);
166 SCHED_UNLOCK(s);
167 return rv;
168 }
169
170 int
autoremove_wake_function(struct wait_queue_entry * wqe,unsigned int mode,int sync,void * key)171 autoremove_wake_function(struct wait_queue_entry *wqe, unsigned int mode,
172 int sync, void *key)
173 {
174 if (wqe->private)
175 wake_up_process(wqe->private);
176 list_del_init(&wqe->entry);
177 return 0;
178 }
179
180 void
prepare_to_wait(wait_queue_head_t * wqh,wait_queue_entry_t * wqe,int state)181 prepare_to_wait(wait_queue_head_t *wqh, wait_queue_entry_t *wqe, int state)
182 {
183 mtx_enter(&wqh->lock);
184 if (list_empty(&wqe->entry))
185 __add_wait_queue(wqh, wqe);
186 mtx_leave(&wqh->lock);
187
188 set_current_state(state);
189 }
190
191 void
finish_wait(wait_queue_head_t * wqh,wait_queue_entry_t * wqe)192 finish_wait(wait_queue_head_t *wqh, wait_queue_entry_t *wqe)
193 {
194 __set_current_state(TASK_RUNNING);
195
196 mtx_enter(&wqh->lock);
197 if (!list_empty(&wqe->entry))
198 list_del_init(&wqe->entry);
199 mtx_leave(&wqh->lock);
200 }
201
202 void
flush_workqueue(struct workqueue_struct * wq)203 flush_workqueue(struct workqueue_struct *wq)
204 {
205 if (cold)
206 return;
207
208 if (wq)
209 taskq_barrier((struct taskq *)wq);
210 }
211
212 bool
flush_work(struct work_struct * work)213 flush_work(struct work_struct *work)
214 {
215 if (cold)
216 return false;
217
218 if (work->tq)
219 taskq_barrier(work->tq);
220 return false;
221 }
222
223 bool
flush_delayed_work(struct delayed_work * dwork)224 flush_delayed_work(struct delayed_work *dwork)
225 {
226 bool ret = false;
227
228 if (cold)
229 return false;
230
231 while (timeout_pending(&dwork->to)) {
232 tsleep(dwork, PWAIT, "fldwto", 1);
233 ret = true;
234 }
235
236 if (dwork->tq)
237 taskq_barrier(dwork->tq);
238 return ret;
239 }
240
241 struct kthread {
242 int (*func)(void *);
243 void *data;
244 struct proc *proc;
245 volatile u_int flags;
246 #define KTHREAD_SHOULDSTOP 0x0000001
247 #define KTHREAD_STOPPED 0x0000002
248 #define KTHREAD_SHOULDPARK 0x0000004
249 #define KTHREAD_PARKED 0x0000008
250 LIST_ENTRY(kthread) next;
251 };
252
253 LIST_HEAD(, kthread) kthread_list = LIST_HEAD_INITIALIZER(kthread_list);
254
255 void
kthread_func(void * arg)256 kthread_func(void *arg)
257 {
258 struct kthread *thread = arg;
259 int ret;
260
261 ret = thread->func(thread->data);
262 thread->flags |= KTHREAD_STOPPED;
263 wakeup(thread);
264 kthread_exit(ret);
265 }
266
267 struct proc *
kthread_run(int (* func)(void *),void * data,const char * name)268 kthread_run(int (*func)(void *), void *data, const char *name)
269 {
270 struct kthread *thread;
271
272 thread = malloc(sizeof(*thread), M_DRM, M_WAITOK);
273 thread->func = func;
274 thread->data = data;
275 thread->flags = 0;
276
277 if (kthread_create(kthread_func, thread, &thread->proc, name)) {
278 free(thread, M_DRM, sizeof(*thread));
279 return ERR_PTR(-ENOMEM);
280 }
281
282 LIST_INSERT_HEAD(&kthread_list, thread, next);
283 return thread->proc;
284 }
285
286 struct kthread_worker *
kthread_create_worker(unsigned int flags,const char * fmt,...)287 kthread_create_worker(unsigned int flags, const char *fmt, ...)
288 {
289 char name[MAXCOMLEN+1];
290 va_list ap;
291
292 struct kthread_worker *w = malloc(sizeof(*w), M_DRM, M_WAITOK);
293 va_start(ap, fmt);
294 vsnprintf(name, sizeof(name), fmt, ap);
295 va_end(ap);
296 w->tq = taskq_create(name, 1, IPL_HIGH, 0);
297
298 return w;
299 }
300
301 void
kthread_destroy_worker(struct kthread_worker * worker)302 kthread_destroy_worker(struct kthread_worker *worker)
303 {
304 taskq_destroy(worker->tq);
305 free(worker, M_DRM, sizeof(*worker));
306
307 }
308
309 void
kthread_init_work(struct kthread_work * work,void (* func)(struct kthread_work *))310 kthread_init_work(struct kthread_work *work, void (*func)(struct kthread_work *))
311 {
312 work->tq = NULL;
313 task_set(&work->task, (void (*)(void *))func, work);
314 }
315
316 bool
kthread_queue_work(struct kthread_worker * worker,struct kthread_work * work)317 kthread_queue_work(struct kthread_worker *worker, struct kthread_work *work)
318 {
319 work->tq = worker->tq;
320 return task_add(work->tq, &work->task);
321 }
322
323 bool
kthread_cancel_work_sync(struct kthread_work * work)324 kthread_cancel_work_sync(struct kthread_work *work)
325 {
326 return task_del(work->tq, &work->task);
327 }
328
329 void
kthread_flush_work(struct kthread_work * work)330 kthread_flush_work(struct kthread_work *work)
331 {
332 if (cold)
333 return;
334
335 if (work->tq)
336 taskq_barrier(work->tq);
337 }
338
339 void
kthread_flush_worker(struct kthread_worker * worker)340 kthread_flush_worker(struct kthread_worker *worker)
341 {
342 if (cold)
343 return;
344
345 if (worker->tq)
346 taskq_barrier(worker->tq);
347 }
348
349 struct kthread *
kthread_lookup(struct proc * p)350 kthread_lookup(struct proc *p)
351 {
352 struct kthread *thread;
353
354 LIST_FOREACH(thread, &kthread_list, next) {
355 if (thread->proc == p)
356 break;
357 }
358 KASSERT(thread);
359
360 return thread;
361 }
362
363 int
kthread_should_park(void)364 kthread_should_park(void)
365 {
366 struct kthread *thread = kthread_lookup(curproc);
367 return (thread->flags & KTHREAD_SHOULDPARK);
368 }
369
370 void
kthread_parkme(void)371 kthread_parkme(void)
372 {
373 struct kthread *thread = kthread_lookup(curproc);
374
375 while (thread->flags & KTHREAD_SHOULDPARK) {
376 thread->flags |= KTHREAD_PARKED;
377 wakeup(thread);
378 tsleep_nsec(thread, PPAUSE, "parkme", INFSLP);
379 thread->flags &= ~KTHREAD_PARKED;
380 }
381 }
382
383 void
kthread_park(struct proc * p)384 kthread_park(struct proc *p)
385 {
386 struct kthread *thread = kthread_lookup(p);
387
388 while ((thread->flags & KTHREAD_PARKED) == 0) {
389 thread->flags |= KTHREAD_SHOULDPARK;
390 wake_up_process(thread->proc);
391 tsleep_nsec(thread, PPAUSE, "park", INFSLP);
392 }
393 }
394
395 void
kthread_unpark(struct proc * p)396 kthread_unpark(struct proc *p)
397 {
398 struct kthread *thread = kthread_lookup(p);
399
400 thread->flags &= ~KTHREAD_SHOULDPARK;
401 wakeup(thread);
402 }
403
404 int
kthread_should_stop(void)405 kthread_should_stop(void)
406 {
407 struct kthread *thread = kthread_lookup(curproc);
408 return (thread->flags & KTHREAD_SHOULDSTOP);
409 }
410
411 void
kthread_stop(struct proc * p)412 kthread_stop(struct proc *p)
413 {
414 struct kthread *thread = kthread_lookup(p);
415
416 while ((thread->flags & KTHREAD_STOPPED) == 0) {
417 thread->flags |= KTHREAD_SHOULDSTOP;
418 kthread_unpark(p);
419 wake_up_process(thread->proc);
420 tsleep_nsec(thread, PPAUSE, "stop", INFSLP);
421 }
422 LIST_REMOVE(thread, next);
423 free(thread, M_DRM, sizeof(*thread));
424 }
425
426 #if NBIOS > 0
427 extern char smbios_board_vendor[];
428 extern char smbios_board_prod[];
429 extern char smbios_board_serial[];
430 #endif
431
432 bool
dmi_match(int slot,const char * str)433 dmi_match(int slot, const char *str)
434 {
435 switch (slot) {
436 case DMI_SYS_VENDOR:
437 if (hw_vendor != NULL &&
438 !strcmp(hw_vendor, str))
439 return true;
440 break;
441 case DMI_PRODUCT_NAME:
442 if (hw_prod != NULL &&
443 !strcmp(hw_prod, str))
444 return true;
445 break;
446 case DMI_PRODUCT_VERSION:
447 if (hw_ver != NULL &&
448 !strcmp(hw_ver, str))
449 return true;
450 break;
451 #if NBIOS > 0
452 case DMI_BOARD_VENDOR:
453 if (strcmp(smbios_board_vendor, str) == 0)
454 return true;
455 break;
456 case DMI_BOARD_NAME:
457 if (strcmp(smbios_board_prod, str) == 0)
458 return true;
459 break;
460 case DMI_BOARD_SERIAL:
461 if (strcmp(smbios_board_serial, str) == 0)
462 return true;
463 break;
464 #else
465 case DMI_BOARD_VENDOR:
466 if (hw_vendor != NULL &&
467 !strcmp(hw_vendor, str))
468 return true;
469 break;
470 case DMI_BOARD_NAME:
471 if (hw_prod != NULL &&
472 !strcmp(hw_prod, str))
473 return true;
474 break;
475 #endif
476 case DMI_NONE:
477 default:
478 return false;
479 }
480
481 return false;
482 }
483
484 static bool
dmi_found(const struct dmi_system_id * dsi)485 dmi_found(const struct dmi_system_id *dsi)
486 {
487 int i, slot;
488
489 for (i = 0; i < nitems(dsi->matches); i++) {
490 slot = dsi->matches[i].slot;
491 if (slot == DMI_NONE)
492 break;
493 if (!dmi_match(slot, dsi->matches[i].substr))
494 return false;
495 }
496
497 return true;
498 }
499
500 const struct dmi_system_id *
dmi_first_match(const struct dmi_system_id * sysid)501 dmi_first_match(const struct dmi_system_id *sysid)
502 {
503 const struct dmi_system_id *dsi;
504
505 for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
506 if (dmi_found(dsi))
507 return dsi;
508 }
509
510 return NULL;
511 }
512
513 #if NBIOS > 0
514 extern char smbios_bios_date[];
515 extern char smbios_bios_version[];
516 #endif
517
518 const char *
dmi_get_system_info(int slot)519 dmi_get_system_info(int slot)
520 {
521 #if NBIOS > 0
522 switch (slot) {
523 case DMI_BIOS_DATE:
524 return smbios_bios_date;
525 case DMI_BIOS_VERSION:
526 return smbios_bios_version;
527 default:
528 printf("%s slot %d not handled\n", __func__, slot);
529 }
530 #endif
531 return NULL;
532 }
533
534 int
dmi_check_system(const struct dmi_system_id * sysid)535 dmi_check_system(const struct dmi_system_id *sysid)
536 {
537 const struct dmi_system_id *dsi;
538 int num = 0;
539
540 for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
541 if (dmi_found(dsi)) {
542 num++;
543 if (dsi->callback && dsi->callback(dsi))
544 break;
545 }
546 }
547 return (num);
548 }
549
550 struct vm_page *
alloc_pages(unsigned int gfp_mask,unsigned int order)551 alloc_pages(unsigned int gfp_mask, unsigned int order)
552 {
553 int flags = (gfp_mask & M_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
554 struct uvm_constraint_range *constraint = &no_constraint;
555 struct pglist mlist;
556
557 if (gfp_mask & M_CANFAIL)
558 flags |= UVM_PLA_FAILOK;
559 if (gfp_mask & M_ZERO)
560 flags |= UVM_PLA_ZERO;
561 if (gfp_mask & __GFP_DMA32)
562 constraint = &dma_constraint;
563
564 TAILQ_INIT(&mlist);
565 if (uvm_pglistalloc(PAGE_SIZE << order, constraint->ucr_low,
566 constraint->ucr_high, PAGE_SIZE, 0, &mlist, 1, flags))
567 return NULL;
568 return TAILQ_FIRST(&mlist);
569 }
570
571 void
__free_pages(struct vm_page * page,unsigned int order)572 __free_pages(struct vm_page *page, unsigned int order)
573 {
574 struct pglist mlist;
575 int i;
576
577 TAILQ_INIT(&mlist);
578 for (i = 0; i < (1 << order); i++)
579 TAILQ_INSERT_TAIL(&mlist, &page[i], pageq);
580 uvm_pglistfree(&mlist);
581 }
582
583 void
__pagevec_release(struct pagevec * pvec)584 __pagevec_release(struct pagevec *pvec)
585 {
586 struct pglist mlist;
587 int i;
588
589 TAILQ_INIT(&mlist);
590 for (i = 0; i < pvec->nr; i++)
591 TAILQ_INSERT_TAIL(&mlist, pvec->pages[i], pageq);
592 uvm_pglistfree(&mlist);
593 pagevec_reinit(pvec);
594 }
595
596 static struct kmem_va_mode kv_physwait = {
597 .kv_map = &phys_map,
598 .kv_wait = 1,
599 };
600
601 void *
kmap(struct vm_page * pg)602 kmap(struct vm_page *pg)
603 {
604 vaddr_t va;
605
606 #if defined (__HAVE_PMAP_DIRECT)
607 va = pmap_map_direct(pg);
608 #else
609 va = (vaddr_t)km_alloc(PAGE_SIZE, &kv_physwait, &kp_none, &kd_waitok);
610 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
611 pmap_update(pmap_kernel());
612 #endif
613 return (void *)va;
614 }
615
616 void
kunmap_va(void * addr)617 kunmap_va(void *addr)
618 {
619 vaddr_t va = (vaddr_t)addr;
620
621 #if defined (__HAVE_PMAP_DIRECT)
622 pmap_unmap_direct(va);
623 #else
624 pmap_kremove(va, PAGE_SIZE);
625 pmap_update(pmap_kernel());
626 km_free((void *)va, PAGE_SIZE, &kv_physwait, &kp_none);
627 #endif
628 }
629
630 vaddr_t kmap_atomic_va;
631 int kmap_atomic_inuse;
632
633 void *
kmap_atomic_prot(struct vm_page * pg,pgprot_t prot)634 kmap_atomic_prot(struct vm_page *pg, pgprot_t prot)
635 {
636 KASSERT(!kmap_atomic_inuse);
637
638 kmap_atomic_inuse = 1;
639 pmap_kenter_pa(kmap_atomic_va, VM_PAGE_TO_PHYS(pg) | prot,
640 PROT_READ | PROT_WRITE);
641 return (void *)kmap_atomic_va;
642 }
643
644 void
kunmap_atomic(void * addr)645 kunmap_atomic(void *addr)
646 {
647 KASSERT(kmap_atomic_inuse);
648
649 pmap_kremove(kmap_atomic_va, PAGE_SIZE);
650 kmap_atomic_inuse = 0;
651 }
652
653 void *
vmap(struct vm_page ** pages,unsigned int npages,unsigned long flags,pgprot_t prot)654 vmap(struct vm_page **pages, unsigned int npages, unsigned long flags,
655 pgprot_t prot)
656 {
657 vaddr_t va;
658 paddr_t pa;
659 int i;
660
661 va = (vaddr_t)km_alloc(PAGE_SIZE * npages, &kv_any, &kp_none,
662 &kd_nowait);
663 if (va == 0)
664 return NULL;
665 for (i = 0; i < npages; i++) {
666 pa = VM_PAGE_TO_PHYS(pages[i]) | prot;
667 pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa,
668 PROT_READ | PROT_WRITE,
669 PROT_READ | PROT_WRITE | PMAP_WIRED);
670 pmap_update(pmap_kernel());
671 }
672
673 return (void *)va;
674 }
675
676 void *
vmap_pfn(unsigned long * pfns,unsigned int npfn,pgprot_t prot)677 vmap_pfn(unsigned long *pfns, unsigned int npfn, pgprot_t prot)
678 {
679 vaddr_t va;
680 paddr_t pa;
681 int i;
682
683 va = (vaddr_t)km_alloc(PAGE_SIZE * npfn, &kv_any, &kp_none,
684 &kd_nowait);
685 if (va == 0)
686 return NULL;
687 for (i = 0; i < npfn; i++) {
688 pa = round_page(pfns[i]) | prot;
689 pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa,
690 PROT_READ | PROT_WRITE,
691 PROT_READ | PROT_WRITE | PMAP_WIRED);
692 pmap_update(pmap_kernel());
693 }
694
695 return (void *)va;
696 }
697
698 void
vunmap(void * addr,size_t size)699 vunmap(void *addr, size_t size)
700 {
701 vaddr_t va = (vaddr_t)addr;
702
703 pmap_remove(pmap_kernel(), va, va + size);
704 pmap_update(pmap_kernel());
705 km_free((void *)va, size, &kv_any, &kp_none);
706 }
707
708 bool
is_vmalloc_addr(const void * p)709 is_vmalloc_addr(const void *p)
710 {
711 vaddr_t min, max, addr;
712
713 min = vm_map_min(kernel_map);
714 max = vm_map_max(kernel_map);
715 addr = (vaddr_t)p;
716
717 if (addr >= min && addr <= max)
718 return true;
719 else
720 return false;
721 }
722
723 void
print_hex_dump(const char * level,const char * prefix_str,int prefix_type,int rowsize,int groupsize,const void * buf,size_t len,bool ascii)724 print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
725 int rowsize, int groupsize, const void *buf, size_t len, bool ascii)
726 {
727 const uint8_t *cbuf = buf;
728 int i;
729
730 for (i = 0; i < len; i++) {
731 if ((i % rowsize) == 0)
732 printf("%s", prefix_str);
733 printf("%02x", cbuf[i]);
734 if ((i % rowsize) == (rowsize - 1))
735 printf("\n");
736 else
737 printf(" ");
738 }
739 }
740
741 void *
memchr_inv(const void * s,int c,size_t n)742 memchr_inv(const void *s, int c, size_t n)
743 {
744 if (n != 0) {
745 const unsigned char *p = s;
746
747 do {
748 if (*p++ != (unsigned char)c)
749 return ((void *)(p - 1));
750 } while (--n != 0);
751 }
752 return (NULL);
753 }
754
755 int
panic_cmp(struct rb_node * a,struct rb_node * b)756 panic_cmp(struct rb_node *a, struct rb_node *b)
757 {
758 panic(__func__);
759 }
760
761 #undef RB_ROOT
762 #define RB_ROOT(head) (head)->rbh_root
763
764 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
765
766 /*
767 * This is a fairly minimal implementation of the Linux "idr" API. It
768 * probably isn't very efficient, and definitely isn't RCU safe. The
769 * pre-load buffer is global instead of per-cpu; we rely on the kernel
770 * lock to make this work. We do randomize our IDs in order to make
771 * them harder to guess.
772 */
773
774 int idr_cmp(struct idr_entry *, struct idr_entry *);
775 SPLAY_PROTOTYPE(idr_tree, idr_entry, entry, idr_cmp);
776
777 struct pool idr_pool;
778 struct idr_entry *idr_entry_cache;
779
780 void
idr_init(struct idr * idr)781 idr_init(struct idr *idr)
782 {
783 SPLAY_INIT(&idr->tree);
784 }
785
786 void
idr_destroy(struct idr * idr)787 idr_destroy(struct idr *idr)
788 {
789 struct idr_entry *id;
790
791 while ((id = SPLAY_MIN(idr_tree, &idr->tree))) {
792 SPLAY_REMOVE(idr_tree, &idr->tree, id);
793 pool_put(&idr_pool, id);
794 }
795 }
796
797 void
idr_preload(unsigned int gfp_mask)798 idr_preload(unsigned int gfp_mask)
799 {
800 int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
801
802 KERNEL_ASSERT_LOCKED();
803
804 if (idr_entry_cache == NULL)
805 idr_entry_cache = pool_get(&idr_pool, flags);
806 }
807
808 int
idr_alloc(struct idr * idr,void * ptr,int start,int end,gfp_t gfp_mask)809 idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
810 {
811 int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
812 struct idr_entry *id;
813 int begin;
814
815 KERNEL_ASSERT_LOCKED();
816
817 if (idr_entry_cache) {
818 id = idr_entry_cache;
819 idr_entry_cache = NULL;
820 } else {
821 id = pool_get(&idr_pool, flags);
822 if (id == NULL)
823 return -ENOMEM;
824 }
825
826 if (end <= 0)
827 end = INT_MAX;
828
829 #ifdef notyet
830 id->id = begin = start + arc4random_uniform(end - start);
831 #else
832 id->id = begin = start;
833 #endif
834 while (SPLAY_INSERT(idr_tree, &idr->tree, id)) {
835 if (id->id == end)
836 id->id = start;
837 else
838 id->id++;
839 if (id->id == begin) {
840 pool_put(&idr_pool, id);
841 return -ENOSPC;
842 }
843 }
844 id->ptr = ptr;
845 return id->id;
846 }
847
848 void *
idr_replace(struct idr * idr,void * ptr,unsigned long id)849 idr_replace(struct idr *idr, void *ptr, unsigned long id)
850 {
851 struct idr_entry find, *res;
852 void *old;
853
854 find.id = id;
855 res = SPLAY_FIND(idr_tree, &idr->tree, &find);
856 if (res == NULL)
857 return ERR_PTR(-ENOENT);
858 old = res->ptr;
859 res->ptr = ptr;
860 return old;
861 }
862
863 void *
idr_remove(struct idr * idr,unsigned long id)864 idr_remove(struct idr *idr, unsigned long id)
865 {
866 struct idr_entry find, *res;
867 void *ptr = NULL;
868
869 find.id = id;
870 res = SPLAY_FIND(idr_tree, &idr->tree, &find);
871 if (res) {
872 SPLAY_REMOVE(idr_tree, &idr->tree, res);
873 ptr = res->ptr;
874 pool_put(&idr_pool, res);
875 }
876 return ptr;
877 }
878
879 void *
idr_find(struct idr * idr,unsigned long id)880 idr_find(struct idr *idr, unsigned long id)
881 {
882 struct idr_entry find, *res;
883
884 find.id = id;
885 res = SPLAY_FIND(idr_tree, &idr->tree, &find);
886 if (res == NULL)
887 return NULL;
888 return res->ptr;
889 }
890
891 void *
idr_get_next(struct idr * idr,int * id)892 idr_get_next(struct idr *idr, int *id)
893 {
894 struct idr_entry *res;
895
896 SPLAY_FOREACH(res, idr_tree, &idr->tree) {
897 if (res->id >= *id) {
898 *id = res->id;
899 return res->ptr;
900 }
901 }
902
903 return NULL;
904 }
905
906 int
idr_for_each(struct idr * idr,int (* func)(int,void *,void *),void * data)907 idr_for_each(struct idr *idr, int (*func)(int, void *, void *), void *data)
908 {
909 struct idr_entry *id;
910 int ret;
911
912 SPLAY_FOREACH(id, idr_tree, &idr->tree) {
913 ret = func(id->id, id->ptr, data);
914 if (ret)
915 return ret;
916 }
917
918 return 0;
919 }
920
921 int
idr_cmp(struct idr_entry * a,struct idr_entry * b)922 idr_cmp(struct idr_entry *a, struct idr_entry *b)
923 {
924 return (a->id < b->id ? -1 : a->id > b->id);
925 }
926
927 SPLAY_GENERATE(idr_tree, idr_entry, entry, idr_cmp);
928
929 void
ida_init(struct ida * ida)930 ida_init(struct ida *ida)
931 {
932 idr_init(&ida->idr);
933 }
934
935 void
ida_destroy(struct ida * ida)936 ida_destroy(struct ida *ida)
937 {
938 idr_destroy(&ida->idr);
939 }
940
941 int
ida_simple_get(struct ida * ida,unsigned int start,unsigned int end,gfp_t gfp_mask)942 ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
943 gfp_t gfp_mask)
944 {
945 return idr_alloc(&ida->idr, NULL, start, end, gfp_mask);
946 }
947
948 void
ida_simple_remove(struct ida * ida,unsigned int id)949 ida_simple_remove(struct ida *ida, unsigned int id)
950 {
951 idr_remove(&ida->idr, id);
952 }
953
954 int
ida_alloc_min(struct ida * ida,unsigned int min,gfp_t gfp)955 ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp)
956 {
957 return idr_alloc(&ida->idr, NULL, min, INT_MAX, gfp);
958 }
959
960 int
ida_alloc_max(struct ida * ida,unsigned int max,gfp_t gfp)961 ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp)
962 {
963 return idr_alloc(&ida->idr, NULL, 0, max - 1, gfp);
964 }
965
966 void
ida_free(struct ida * ida,unsigned int id)967 ida_free(struct ida *ida, unsigned int id)
968 {
969 idr_remove(&ida->idr, id);
970 }
971
972 int
xarray_cmp(struct xarray_entry * a,struct xarray_entry * b)973 xarray_cmp(struct xarray_entry *a, struct xarray_entry *b)
974 {
975 return (a->id < b->id ? -1 : a->id > b->id);
976 }
977
978 SPLAY_PROTOTYPE(xarray_tree, xarray_entry, entry, xarray_cmp);
979 struct pool xa_pool;
980 SPLAY_GENERATE(xarray_tree, xarray_entry, entry, xarray_cmp);
981
982 void
xa_init_flags(struct xarray * xa,gfp_t flags)983 xa_init_flags(struct xarray *xa, gfp_t flags)
984 {
985 static int initialized;
986
987 if (!initialized) {
988 pool_init(&xa_pool, sizeof(struct xarray_entry), 0, IPL_NONE, 0,
989 "xapl", NULL);
990 initialized = 1;
991 }
992 SPLAY_INIT(&xa->xa_tree);
993 if (flags & XA_FLAGS_LOCK_IRQ)
994 mtx_init(&xa->xa_lock, IPL_TTY);
995 else
996 mtx_init(&xa->xa_lock, IPL_NONE);
997 }
998
999 void
xa_destroy(struct xarray * xa)1000 xa_destroy(struct xarray *xa)
1001 {
1002 struct xarray_entry *id;
1003
1004 while ((id = SPLAY_MIN(xarray_tree, &xa->xa_tree))) {
1005 SPLAY_REMOVE(xarray_tree, &xa->xa_tree, id);
1006 pool_put(&xa_pool, id);
1007 }
1008 }
1009
1010 /* Don't wrap ids. */
1011 int
__xa_alloc(struct xarray * xa,u32 * id,void * entry,int limit,gfp_t gfp)1012 __xa_alloc(struct xarray *xa, u32 *id, void *entry, int limit, gfp_t gfp)
1013 {
1014 struct xarray_entry *xid;
1015 int start = (xa->xa_flags & XA_FLAGS_ALLOC1) ? 1 : 0;
1016 int begin;
1017
1018 if (gfp & GFP_NOWAIT) {
1019 xid = pool_get(&xa_pool, PR_NOWAIT);
1020 } else {
1021 mtx_leave(&xa->xa_lock);
1022 xid = pool_get(&xa_pool, PR_WAITOK);
1023 mtx_enter(&xa->xa_lock);
1024 }
1025
1026 if (xid == NULL)
1027 return -ENOMEM;
1028
1029 if (limit <= 0)
1030 limit = INT_MAX;
1031
1032 xid->id = begin = start;
1033
1034 while (SPLAY_INSERT(xarray_tree, &xa->xa_tree, xid)) {
1035 if (xid->id == limit)
1036 xid->id = start;
1037 else
1038 xid->id++;
1039 if (xid->id == begin) {
1040 pool_put(&xa_pool, xid);
1041 return -EBUSY;
1042 }
1043 }
1044 xid->ptr = entry;
1045 *id = xid->id;
1046 return 0;
1047 }
1048
1049 /*
1050 * Wrap ids and store next id.
1051 * We walk the entire tree so don't special case wrapping.
1052 * The only caller of this (i915_drm_client.c) doesn't use next id.
1053 */
1054 int
__xa_alloc_cyclic(struct xarray * xa,u32 * id,void * entry,int limit,u32 * next,gfp_t gfp)1055 __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, int limit, u32 *next,
1056 gfp_t gfp)
1057 {
1058 int r = __xa_alloc(xa, id, entry, limit, gfp);
1059 *next = *id + 1;
1060 return r;
1061 }
1062
1063 void *
__xa_erase(struct xarray * xa,unsigned long index)1064 __xa_erase(struct xarray *xa, unsigned long index)
1065 {
1066 struct xarray_entry find, *res;
1067 void *ptr = NULL;
1068
1069 find.id = index;
1070 res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
1071 if (res) {
1072 SPLAY_REMOVE(xarray_tree, &xa->xa_tree, res);
1073 ptr = res->ptr;
1074 pool_put(&xa_pool, res);
1075 }
1076 return ptr;
1077 }
1078
1079 void *
__xa_load(struct xarray * xa,unsigned long index)1080 __xa_load(struct xarray *xa, unsigned long index)
1081 {
1082 struct xarray_entry find, *res;
1083
1084 find.id = index;
1085 res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
1086 if (res == NULL)
1087 return NULL;
1088 return res->ptr;
1089 }
1090
1091 void *
__xa_store(struct xarray * xa,unsigned long index,void * entry,gfp_t gfp)1092 __xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1093 {
1094 struct xarray_entry find, *res;
1095 void *prev;
1096
1097 if (entry == NULL)
1098 return __xa_erase(xa, index);
1099
1100 find.id = index;
1101 res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
1102 if (res != NULL) {
1103 /* index exists */
1104 /* XXX Multislot entries updates not implemented yet */
1105 prev = res->ptr;
1106 res->ptr = entry;
1107 return prev;
1108 }
1109
1110 /* index not found, add new */
1111 if (gfp & GFP_NOWAIT) {
1112 res = pool_get(&xa_pool, PR_NOWAIT);
1113 } else {
1114 mtx_leave(&xa->xa_lock);
1115 res = pool_get(&xa_pool, PR_WAITOK);
1116 mtx_enter(&xa->xa_lock);
1117 }
1118 if (res == NULL)
1119 return XA_ERROR(-ENOMEM);
1120 res->id = index;
1121 res->ptr = entry;
1122 if (SPLAY_INSERT(xarray_tree, &xa->xa_tree, res) != NULL)
1123 return XA_ERROR(-EINVAL);
1124 return NULL; /* no prev entry at index */
1125 }
1126
1127 void *
xa_get_next(struct xarray * xa,unsigned long * index)1128 xa_get_next(struct xarray *xa, unsigned long *index)
1129 {
1130 struct xarray_entry *res;
1131
1132 SPLAY_FOREACH(res, xarray_tree, &xa->xa_tree) {
1133 if (res->id >= *index) {
1134 *index = res->id;
1135 return res->ptr;
1136 }
1137 }
1138
1139 return NULL;
1140 }
1141
1142 int
sg_alloc_table(struct sg_table * table,unsigned int nents,gfp_t gfp_mask)1143 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
1144 {
1145 table->sgl = mallocarray(nents, sizeof(struct scatterlist),
1146 M_DRM, gfp_mask | M_ZERO);
1147 if (table->sgl == NULL)
1148 return -ENOMEM;
1149 table->nents = table->orig_nents = nents;
1150 sg_mark_end(&table->sgl[nents - 1]);
1151 return 0;
1152 }
1153
1154 void
sg_free_table(struct sg_table * table)1155 sg_free_table(struct sg_table *table)
1156 {
1157 free(table->sgl, M_DRM,
1158 table->orig_nents * sizeof(struct scatterlist));
1159 table->orig_nents = 0;
1160 table->sgl = NULL;
1161 }
1162
1163 size_t
sg_copy_from_buffer(struct scatterlist * sgl,unsigned int nents,const void * buf,size_t buflen)1164 sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
1165 const void *buf, size_t buflen)
1166 {
1167 panic("%s", __func__);
1168 }
1169
1170 int
i2c_master_xfer(struct i2c_adapter * adap,struct i2c_msg * msgs,int num)1171 i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1172 {
1173 void *cmd = NULL;
1174 int cmdlen = 0;
1175 int err, ret = 0;
1176 int op;
1177
1178 iic_acquire_bus(&adap->ic, 0);
1179
1180 while (num > 2) {
1181 op = (msgs->flags & I2C_M_RD) ? I2C_OP_READ : I2C_OP_WRITE;
1182 err = iic_exec(&adap->ic, op, msgs->addr, NULL, 0,
1183 msgs->buf, msgs->len, 0);
1184 if (err) {
1185 ret = -err;
1186 goto fail;
1187 }
1188 msgs++;
1189 num--;
1190 ret++;
1191 }
1192
1193 if (num > 1) {
1194 cmd = msgs->buf;
1195 cmdlen = msgs->len;
1196 msgs++;
1197 num--;
1198 ret++;
1199 }
1200
1201 op = (msgs->flags & I2C_M_RD) ?
1202 I2C_OP_READ_WITH_STOP : I2C_OP_WRITE_WITH_STOP;
1203 err = iic_exec(&adap->ic, op, msgs->addr, cmd, cmdlen,
1204 msgs->buf, msgs->len, 0);
1205 if (err) {
1206 ret = -err;
1207 goto fail;
1208 }
1209 msgs++;
1210 ret++;
1211
1212 fail:
1213 iic_release_bus(&adap->ic, 0);
1214
1215 return ret;
1216 }
1217
1218 int
__i2c_transfer(struct i2c_adapter * adap,struct i2c_msg * msgs,int num)1219 __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1220 {
1221 int ret, retries;
1222
1223 retries = adap->retries;
1224 retry:
1225 if (adap->algo)
1226 ret = adap->algo->master_xfer(adap, msgs, num);
1227 else
1228 ret = i2c_master_xfer(adap, msgs, num);
1229 if (ret == -EAGAIN && retries > 0) {
1230 retries--;
1231 goto retry;
1232 }
1233
1234 return ret;
1235 }
1236
1237 int
i2c_transfer(struct i2c_adapter * adap,struct i2c_msg * msgs,int num)1238 i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1239 {
1240 int ret;
1241
1242 if (adap->lock_ops)
1243 adap->lock_ops->lock_bus(adap, 0);
1244
1245 ret = __i2c_transfer(adap, msgs, num);
1246
1247 if (adap->lock_ops)
1248 adap->lock_ops->unlock_bus(adap, 0);
1249
1250 return ret;
1251 }
1252
1253 int
i2c_bb_master_xfer(struct i2c_adapter * adap,struct i2c_msg * msgs,int num)1254 i2c_bb_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1255 {
1256 struct i2c_algo_bit_data *algo = adap->algo_data;
1257 struct i2c_adapter bb;
1258
1259 memset(&bb, 0, sizeof(bb));
1260 bb.ic = algo->ic;
1261 bb.retries = adap->retries;
1262 return i2c_master_xfer(&bb, msgs, num);
1263 }
1264
1265 uint32_t
i2c_bb_functionality(struct i2c_adapter * adap)1266 i2c_bb_functionality(struct i2c_adapter *adap)
1267 {
1268 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1269 }
1270
1271 struct i2c_algorithm i2c_bit_algo = {
1272 .master_xfer = i2c_bb_master_xfer,
1273 .functionality = i2c_bb_functionality
1274 };
1275
1276 int
i2c_bit_add_bus(struct i2c_adapter * adap)1277 i2c_bit_add_bus(struct i2c_adapter *adap)
1278 {
1279 adap->algo = &i2c_bit_algo;
1280 adap->retries = 3;
1281
1282 return 0;
1283 }
1284
1285 #if defined(__amd64__) || defined(__i386__)
1286
1287 /*
1288 * This is a minimal implementation of the Linux vga_get/vga_put
1289 * interface. In all likelihood, it will only work for inteldrm(4) as
1290 * it assumes that if there is another active VGA device in the
1291 * system, it is sitting behind a PCI bridge.
1292 */
1293
1294 extern int pci_enumerate_bus(struct pci_softc *,
1295 int (*)(struct pci_attach_args *), struct pci_attach_args *);
1296
1297 pcitag_t vga_bridge_tag;
1298 int vga_bridge_disabled;
1299
1300 int
vga_disable_bridge(struct pci_attach_args * pa)1301 vga_disable_bridge(struct pci_attach_args *pa)
1302 {
1303 pcireg_t bhlc, bc;
1304
1305 if (pa->pa_domain != 0)
1306 return 0;
1307
1308 bhlc = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
1309 if (PCI_HDRTYPE_TYPE(bhlc) != 1)
1310 return 0;
1311
1312 bc = pci_conf_read(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL);
1313 if ((bc & PPB_BC_VGA_ENABLE) == 0)
1314 return 0;
1315 bc &= ~PPB_BC_VGA_ENABLE;
1316 pci_conf_write(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL, bc);
1317
1318 vga_bridge_tag = pa->pa_tag;
1319 vga_bridge_disabled = 1;
1320
1321 return 1;
1322 }
1323
1324 void
vga_get_uninterruptible(struct pci_dev * pdev,int rsrc)1325 vga_get_uninterruptible(struct pci_dev *pdev, int rsrc)
1326 {
1327 if (pdev->pci->sc_bridgetag != NULL)
1328 return;
1329 pci_enumerate_bus(pdev->pci, vga_disable_bridge, NULL);
1330 }
1331
1332 void
vga_put(struct pci_dev * pdev,int rsrc)1333 vga_put(struct pci_dev *pdev, int rsrc)
1334 {
1335 pcireg_t bc;
1336
1337 if (!vga_bridge_disabled)
1338 return;
1339
1340 bc = pci_conf_read(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL);
1341 bc |= PPB_BC_VGA_ENABLE;
1342 pci_conf_write(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL, bc);
1343
1344 vga_bridge_disabled = 0;
1345 }
1346
1347 #endif
1348
1349 /*
1350 * ACPI types and interfaces.
1351 */
1352
1353 #ifdef __HAVE_ACPI
1354 #include "acpi.h"
1355 #endif
1356
1357 #if NACPI > 0
1358
1359 #include <dev/acpi/acpireg.h>
1360 #include <dev/acpi/acpivar.h>
1361 #include <dev/acpi/amltypes.h>
1362 #include <dev/acpi/dsdt.h>
1363
1364 acpi_status
acpi_get_table(const char * sig,int instance,struct acpi_table_header ** hdr)1365 acpi_get_table(const char *sig, int instance,
1366 struct acpi_table_header **hdr)
1367 {
1368 struct acpi_softc *sc = acpi_softc;
1369 struct acpi_q *entry;
1370
1371 KASSERT(instance == 1);
1372
1373 if (sc == NULL)
1374 return AE_NOT_FOUND;
1375
1376 SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) {
1377 if (memcmp(entry->q_table, sig, strlen(sig)) == 0) {
1378 *hdr = entry->q_table;
1379 return 0;
1380 }
1381 }
1382
1383 return AE_NOT_FOUND;
1384 }
1385
1386 void
acpi_put_table(struct acpi_table_header * hdr)1387 acpi_put_table(struct acpi_table_header *hdr)
1388 {
1389 }
1390
1391 acpi_status
acpi_get_handle(acpi_handle node,const char * name,acpi_handle * rnode)1392 acpi_get_handle(acpi_handle node, const char *name, acpi_handle *rnode)
1393 {
1394 node = aml_searchname(node, name);
1395 if (node == NULL)
1396 return AE_NOT_FOUND;
1397
1398 *rnode = node;
1399 return 0;
1400 }
1401
1402 acpi_status
acpi_get_name(acpi_handle node,int type,struct acpi_buffer * buffer)1403 acpi_get_name(acpi_handle node, int type, struct acpi_buffer *buffer)
1404 {
1405 KASSERT(buffer->length != ACPI_ALLOCATE_BUFFER);
1406 KASSERT(type == ACPI_FULL_PATHNAME);
1407 strlcpy(buffer->pointer, aml_nodename(node), buffer->length);
1408 return 0;
1409 }
1410
1411 acpi_status
acpi_evaluate_object(acpi_handle node,const char * name,struct acpi_object_list * params,struct acpi_buffer * result)1412 acpi_evaluate_object(acpi_handle node, const char *name,
1413 struct acpi_object_list *params, struct acpi_buffer *result)
1414 {
1415 struct aml_value args[4], res;
1416 union acpi_object *obj;
1417 uint8_t *data;
1418 int i;
1419
1420 KASSERT(params->count <= nitems(args));
1421
1422 for (i = 0; i < params->count; i++) {
1423 args[i].type = params->pointer[i].type;
1424 switch (args[i].type) {
1425 case AML_OBJTYPE_INTEGER:
1426 args[i].v_integer = params->pointer[i].integer.value;
1427 break;
1428 case AML_OBJTYPE_BUFFER:
1429 args[i].length = params->pointer[i].buffer.length;
1430 args[i].v_buffer = params->pointer[i].buffer.pointer;
1431 break;
1432 default:
1433 printf("%s: arg type 0x%02x", __func__, args[i].type);
1434 return AE_BAD_PARAMETER;
1435 }
1436 }
1437
1438 if (name) {
1439 node = aml_searchname(node, name);
1440 if (node == NULL)
1441 return AE_NOT_FOUND;
1442 }
1443 if (aml_evalnode(acpi_softc, node, params->count, args, &res)) {
1444 aml_freevalue(&res);
1445 return AE_ERROR;
1446 }
1447
1448 KASSERT(result->length == ACPI_ALLOCATE_BUFFER);
1449
1450 result->length = sizeof(union acpi_object);
1451 switch (res.type) {
1452 case AML_OBJTYPE_BUFFER:
1453 result->length += res.length;
1454 result->pointer = malloc(result->length, M_DRM, M_WAITOK);
1455 obj = (union acpi_object *)result->pointer;
1456 data = (uint8_t *)(obj + 1);
1457 obj->type = res.type;
1458 obj->buffer.length = res.length;
1459 obj->buffer.pointer = data;
1460 memcpy(data, res.v_buffer, res.length);
1461 break;
1462 default:
1463 printf("%s: return type 0x%02x", __func__, res.type);
1464 aml_freevalue(&res);
1465 return AE_ERROR;
1466 }
1467
1468 aml_freevalue(&res);
1469 return 0;
1470 }
1471
1472 SLIST_HEAD(, notifier_block) drm_linux_acpi_notify_list =
1473 SLIST_HEAD_INITIALIZER(drm_linux_acpi_notify_list);
1474
1475 int
drm_linux_acpi_notify(struct aml_node * node,int notify,void * arg)1476 drm_linux_acpi_notify(struct aml_node *node, int notify, void *arg)
1477 {
1478 struct acpi_bus_event event;
1479 struct notifier_block *nb;
1480
1481 event.device_class = ACPI_VIDEO_CLASS;
1482 event.type = notify;
1483
1484 SLIST_FOREACH(nb, &drm_linux_acpi_notify_list, link)
1485 nb->notifier_call(nb, 0, &event);
1486 return 0;
1487 }
1488
1489 int
register_acpi_notifier(struct notifier_block * nb)1490 register_acpi_notifier(struct notifier_block *nb)
1491 {
1492 SLIST_INSERT_HEAD(&drm_linux_acpi_notify_list, nb, link);
1493 return 0;
1494 }
1495
1496 int
unregister_acpi_notifier(struct notifier_block * nb)1497 unregister_acpi_notifier(struct notifier_block *nb)
1498 {
1499 struct notifier_block *tmp;
1500
1501 SLIST_FOREACH(tmp, &drm_linux_acpi_notify_list, link) {
1502 if (tmp == nb) {
1503 SLIST_REMOVE(&drm_linux_acpi_notify_list, nb,
1504 notifier_block, link);
1505 return 0;
1506 }
1507 }
1508
1509 return -ENOENT;
1510 }
1511
1512 const char *
acpi_format_exception(acpi_status status)1513 acpi_format_exception(acpi_status status)
1514 {
1515 switch (status) {
1516 case AE_NOT_FOUND:
1517 return "not found";
1518 case AE_BAD_PARAMETER:
1519 return "bad parameter";
1520 default:
1521 return "unknown";
1522 }
1523 }
1524
1525 #endif
1526
1527 SLIST_HEAD(,backlight_device) backlight_device_list =
1528 SLIST_HEAD_INITIALIZER(backlight_device_list);
1529
1530 void
backlight_do_update_status(void * arg)1531 backlight_do_update_status(void *arg)
1532 {
1533 backlight_update_status(arg);
1534 }
1535
1536 struct backlight_device *
backlight_device_register(const char * name,void * kdev,void * data,const struct backlight_ops * ops,const struct backlight_properties * props)1537 backlight_device_register(const char *name, void *kdev, void *data,
1538 const struct backlight_ops *ops, const struct backlight_properties *props)
1539 {
1540 struct backlight_device *bd;
1541
1542 bd = malloc(sizeof(*bd), M_DRM, M_WAITOK);
1543 bd->ops = ops;
1544 bd->props = *props;
1545 bd->data = data;
1546
1547 task_set(&bd->task, backlight_do_update_status, bd);
1548
1549 SLIST_INSERT_HEAD(&backlight_device_list, bd, next);
1550 bd->name = name;
1551
1552 return bd;
1553 }
1554
1555 void
backlight_device_unregister(struct backlight_device * bd)1556 backlight_device_unregister(struct backlight_device *bd)
1557 {
1558 SLIST_REMOVE(&backlight_device_list, bd, backlight_device, next);
1559 free(bd, M_DRM, sizeof(*bd));
1560 }
1561
1562 void
backlight_schedule_update_status(struct backlight_device * bd)1563 backlight_schedule_update_status(struct backlight_device *bd)
1564 {
1565 task_add(systq, &bd->task);
1566 }
1567
1568 int
backlight_enable(struct backlight_device * bd)1569 backlight_enable(struct backlight_device *bd)
1570 {
1571 if (bd == NULL)
1572 return 0;
1573
1574 bd->props.power = FB_BLANK_UNBLANK;
1575
1576 return bd->ops->update_status(bd);
1577 }
1578
1579 int
backlight_disable(struct backlight_device * bd)1580 backlight_disable(struct backlight_device *bd)
1581 {
1582 if (bd == NULL)
1583 return 0;
1584
1585 bd->props.power = FB_BLANK_POWERDOWN;
1586
1587 return bd->ops->update_status(bd);
1588 }
1589
1590 struct backlight_device *
backlight_device_get_by_name(const char * name)1591 backlight_device_get_by_name(const char *name)
1592 {
1593 struct backlight_device *bd;
1594
1595 SLIST_FOREACH(bd, &backlight_device_list, next) {
1596 if (strcmp(name, bd->name) == 0)
1597 return bd;
1598 }
1599
1600 return NULL;
1601 }
1602
1603 struct drvdata {
1604 struct device *dev;
1605 void *data;
1606 SLIST_ENTRY(drvdata) next;
1607 };
1608
1609 SLIST_HEAD(,drvdata) drvdata_list = SLIST_HEAD_INITIALIZER(drvdata_list);
1610
1611 void
dev_set_drvdata(struct device * dev,void * data)1612 dev_set_drvdata(struct device *dev, void *data)
1613 {
1614 struct drvdata *drvdata;
1615
1616 SLIST_FOREACH(drvdata, &drvdata_list, next) {
1617 if (drvdata->dev == dev) {
1618 drvdata->data = data;
1619 return;
1620 }
1621 }
1622
1623 if (data == NULL)
1624 return;
1625
1626 drvdata = malloc(sizeof(*drvdata), M_DRM, M_WAITOK);
1627 drvdata->dev = dev;
1628 drvdata->data = data;
1629
1630 SLIST_INSERT_HEAD(&drvdata_list, drvdata, next);
1631 }
1632
1633 void *
dev_get_drvdata(struct device * dev)1634 dev_get_drvdata(struct device *dev)
1635 {
1636 struct drvdata *drvdata;
1637
1638 SLIST_FOREACH(drvdata, &drvdata_list, next) {
1639 if (drvdata->dev == dev)
1640 return drvdata->data;
1641 }
1642
1643 return NULL;
1644 }
1645
1646 void
drm_sysfs_hotplug_event(struct drm_device * dev)1647 drm_sysfs_hotplug_event(struct drm_device *dev)
1648 {
1649 knote_locked(&dev->note, NOTE_CHANGE);
1650 }
1651
1652 void
drm_sysfs_connector_hotplug_event(struct drm_connector * connector)1653 drm_sysfs_connector_hotplug_event(struct drm_connector *connector)
1654 {
1655 knote_locked(&connector->dev->note, NOTE_CHANGE);
1656 }
1657
1658 void
drm_sysfs_connector_status_event(struct drm_connector * connector,struct drm_property * property)1659 drm_sysfs_connector_status_event(struct drm_connector *connector,
1660 struct drm_property *property)
1661 {
1662 STUB();
1663 }
1664
1665 void
drm_sysfs_connector_property_event(struct drm_connector * connector,struct drm_property * property)1666 drm_sysfs_connector_property_event(struct drm_connector *connector,
1667 struct drm_property *property)
1668 {
1669 STUB();
1670 }
1671
1672 struct dma_fence *
dma_fence_get(struct dma_fence * fence)1673 dma_fence_get(struct dma_fence *fence)
1674 {
1675 if (fence)
1676 kref_get(&fence->refcount);
1677 return fence;
1678 }
1679
1680 struct dma_fence *
dma_fence_get_rcu(struct dma_fence * fence)1681 dma_fence_get_rcu(struct dma_fence *fence)
1682 {
1683 if (fence)
1684 kref_get(&fence->refcount);
1685 return fence;
1686 }
1687
1688 struct dma_fence *
dma_fence_get_rcu_safe(struct dma_fence ** dfp)1689 dma_fence_get_rcu_safe(struct dma_fence **dfp)
1690 {
1691 struct dma_fence *fence;
1692 if (dfp == NULL)
1693 return NULL;
1694 fence = *dfp;
1695 if (fence)
1696 kref_get(&fence->refcount);
1697 return fence;
1698 }
1699
1700 void
dma_fence_release(struct kref * ref)1701 dma_fence_release(struct kref *ref)
1702 {
1703 struct dma_fence *fence = container_of(ref, struct dma_fence, refcount);
1704 if (fence->ops && fence->ops->release)
1705 fence->ops->release(fence);
1706 else
1707 free(fence, M_DRM, 0);
1708 }
1709
1710 void
dma_fence_put(struct dma_fence * fence)1711 dma_fence_put(struct dma_fence *fence)
1712 {
1713 if (fence)
1714 kref_put(&fence->refcount, dma_fence_release);
1715 }
1716
1717 int
dma_fence_signal_timestamp_locked(struct dma_fence * fence,ktime_t timestamp)1718 dma_fence_signal_timestamp_locked(struct dma_fence *fence, ktime_t timestamp)
1719 {
1720 struct dma_fence_cb *cur, *tmp;
1721 struct list_head cb_list;
1722
1723 if (fence == NULL)
1724 return -EINVAL;
1725
1726 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1727 return -EINVAL;
1728
1729 list_replace(&fence->cb_list, &cb_list);
1730
1731 fence->timestamp = timestamp;
1732 set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
1733
1734 list_for_each_entry_safe(cur, tmp, &cb_list, node) {
1735 INIT_LIST_HEAD(&cur->node);
1736 cur->func(fence, cur);
1737 }
1738
1739 return 0;
1740 }
1741
1742 int
dma_fence_signal(struct dma_fence * fence)1743 dma_fence_signal(struct dma_fence *fence)
1744 {
1745 int r;
1746
1747 if (fence == NULL)
1748 return -EINVAL;
1749
1750 mtx_enter(fence->lock);
1751 r = dma_fence_signal_timestamp_locked(fence, ktime_get());
1752 mtx_leave(fence->lock);
1753
1754 return r;
1755 }
1756
1757 int
dma_fence_signal_locked(struct dma_fence * fence)1758 dma_fence_signal_locked(struct dma_fence *fence)
1759 {
1760 if (fence == NULL)
1761 return -EINVAL;
1762
1763 return dma_fence_signal_timestamp_locked(fence, ktime_get());
1764 }
1765
1766 int
dma_fence_signal_timestamp(struct dma_fence * fence,ktime_t timestamp)1767 dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp)
1768 {
1769 int r;
1770
1771 if (fence == NULL)
1772 return -EINVAL;
1773
1774 mtx_enter(fence->lock);
1775 r = dma_fence_signal_timestamp_locked(fence, timestamp);
1776 mtx_leave(fence->lock);
1777
1778 return r;
1779 }
1780
1781 bool
dma_fence_is_signaled(struct dma_fence * fence)1782 dma_fence_is_signaled(struct dma_fence *fence)
1783 {
1784 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1785 return true;
1786
1787 if (fence->ops->signaled && fence->ops->signaled(fence)) {
1788 dma_fence_signal(fence);
1789 return true;
1790 }
1791
1792 return false;
1793 }
1794
1795 bool
dma_fence_is_signaled_locked(struct dma_fence * fence)1796 dma_fence_is_signaled_locked(struct dma_fence *fence)
1797 {
1798 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1799 return true;
1800
1801 if (fence->ops->signaled && fence->ops->signaled(fence)) {
1802 dma_fence_signal_locked(fence);
1803 return true;
1804 }
1805
1806 return false;
1807 }
1808
1809 ktime_t
dma_fence_timestamp(struct dma_fence * fence)1810 dma_fence_timestamp(struct dma_fence *fence)
1811 {
1812 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1813 while (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
1814 CPU_BUSY_CYCLE();
1815 return fence->timestamp;
1816 } else {
1817 return ktime_get();
1818 }
1819 }
1820
1821 long
dma_fence_wait_timeout(struct dma_fence * fence,bool intr,long timeout)1822 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, long timeout)
1823 {
1824 if (timeout < 0)
1825 return -EINVAL;
1826
1827 if (fence->ops->wait)
1828 return fence->ops->wait(fence, intr, timeout);
1829 else
1830 return dma_fence_default_wait(fence, intr, timeout);
1831 }
1832
1833 long
dma_fence_wait(struct dma_fence * fence,bool intr)1834 dma_fence_wait(struct dma_fence *fence, bool intr)
1835 {
1836 long ret;
1837
1838 ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
1839 if (ret < 0)
1840 return ret;
1841
1842 return 0;
1843 }
1844
1845 void
dma_fence_enable_sw_signaling(struct dma_fence * fence)1846 dma_fence_enable_sw_signaling(struct dma_fence *fence)
1847 {
1848 if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
1849 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
1850 fence->ops->enable_signaling) {
1851 mtx_enter(fence->lock);
1852 if (!fence->ops->enable_signaling(fence))
1853 dma_fence_signal_locked(fence);
1854 mtx_leave(fence->lock);
1855 }
1856 }
1857
1858 void
dma_fence_init(struct dma_fence * fence,const struct dma_fence_ops * ops,struct mutex * lock,uint64_t context,uint64_t seqno)1859 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
1860 struct mutex *lock, uint64_t context, uint64_t seqno)
1861 {
1862 fence->ops = ops;
1863 fence->lock = lock;
1864 fence->context = context;
1865 fence->seqno = seqno;
1866 fence->flags = 0;
1867 fence->error = 0;
1868 kref_init(&fence->refcount);
1869 INIT_LIST_HEAD(&fence->cb_list);
1870 }
1871
1872 int
dma_fence_add_callback(struct dma_fence * fence,struct dma_fence_cb * cb,dma_fence_func_t func)1873 dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
1874 dma_fence_func_t func)
1875 {
1876 int ret = 0;
1877 bool was_set;
1878
1879 if (WARN_ON(!fence || !func))
1880 return -EINVAL;
1881
1882 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1883 INIT_LIST_HEAD(&cb->node);
1884 return -ENOENT;
1885 }
1886
1887 mtx_enter(fence->lock);
1888
1889 was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
1890
1891 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1892 ret = -ENOENT;
1893 else if (!was_set && fence->ops->enable_signaling) {
1894 if (!fence->ops->enable_signaling(fence)) {
1895 dma_fence_signal_locked(fence);
1896 ret = -ENOENT;
1897 }
1898 }
1899
1900 if (!ret) {
1901 cb->func = func;
1902 list_add_tail(&cb->node, &fence->cb_list);
1903 } else
1904 INIT_LIST_HEAD(&cb->node);
1905 mtx_leave(fence->lock);
1906
1907 return ret;
1908 }
1909
1910 bool
dma_fence_remove_callback(struct dma_fence * fence,struct dma_fence_cb * cb)1911 dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
1912 {
1913 bool ret;
1914
1915 mtx_enter(fence->lock);
1916
1917 ret = !list_empty(&cb->node);
1918 if (ret)
1919 list_del_init(&cb->node);
1920
1921 mtx_leave(fence->lock);
1922
1923 return ret;
1924 }
1925
1926 static atomic64_t drm_fence_context_count = ATOMIC64_INIT(1);
1927
1928 uint64_t
dma_fence_context_alloc(unsigned int num)1929 dma_fence_context_alloc(unsigned int num)
1930 {
1931 return atomic64_add_return(num, &drm_fence_context_count) - num;
1932 }
1933
1934 struct default_wait_cb {
1935 struct dma_fence_cb base;
1936 struct proc *proc;
1937 };
1938
1939 static void
dma_fence_default_wait_cb(struct dma_fence * fence,struct dma_fence_cb * cb)1940 dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1941 {
1942 struct default_wait_cb *wait =
1943 container_of(cb, struct default_wait_cb, base);
1944 wake_up_process(wait->proc);
1945 }
1946
1947 long
dma_fence_default_wait(struct dma_fence * fence,bool intr,signed long timeout)1948 dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
1949 {
1950 long ret = timeout ? timeout : 1;
1951 unsigned long end;
1952 int err;
1953 struct default_wait_cb cb;
1954 bool was_set;
1955
1956 KASSERT(timeout <= INT_MAX);
1957
1958 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1959 return ret;
1960
1961 mtx_enter(fence->lock);
1962
1963 was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
1964 &fence->flags);
1965
1966 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1967 goto out;
1968
1969 if (!was_set && fence->ops->enable_signaling) {
1970 if (!fence->ops->enable_signaling(fence)) {
1971 dma_fence_signal_locked(fence);
1972 goto out;
1973 }
1974 }
1975
1976 if (timeout == 0) {
1977 ret = 0;
1978 goto out;
1979 }
1980
1981 cb.base.func = dma_fence_default_wait_cb;
1982 cb.proc = curproc;
1983 list_add(&cb.base.node, &fence->cb_list);
1984
1985 end = jiffies + timeout;
1986 for (ret = timeout; ret > 0; ret = MAX(0, end - jiffies)) {
1987 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1988 break;
1989 err = msleep(curproc, fence->lock, intr ? PCATCH : 0,
1990 "dmafence", ret);
1991 if (err == EINTR || err == ERESTART) {
1992 ret = -ERESTARTSYS;
1993 break;
1994 }
1995 }
1996
1997 if (!list_empty(&cb.base.node))
1998 list_del(&cb.base.node);
1999 out:
2000 mtx_leave(fence->lock);
2001
2002 return ret;
2003 }
2004
2005 static bool
dma_fence_test_signaled_any(struct dma_fence ** fences,uint32_t count,uint32_t * idx)2006 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
2007 uint32_t *idx)
2008 {
2009 int i;
2010
2011 for (i = 0; i < count; ++i) {
2012 struct dma_fence *fence = fences[i];
2013 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
2014 if (idx)
2015 *idx = i;
2016 return true;
2017 }
2018 }
2019 return false;
2020 }
2021
2022 long
dma_fence_wait_any_timeout(struct dma_fence ** fences,uint32_t count,bool intr,long timeout,uint32_t * idx)2023 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
2024 bool intr, long timeout, uint32_t *idx)
2025 {
2026 struct default_wait_cb *cb;
2027 long ret = timeout;
2028 unsigned long end;
2029 int i, err;
2030
2031 KASSERT(timeout <= INT_MAX);
2032
2033 if (timeout == 0) {
2034 for (i = 0; i < count; i++) {
2035 if (dma_fence_is_signaled(fences[i])) {
2036 if (idx)
2037 *idx = i;
2038 return 1;
2039 }
2040 }
2041 return 0;
2042 }
2043
2044 cb = mallocarray(count, sizeof(*cb), M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
2045 if (cb == NULL)
2046 return -ENOMEM;
2047
2048 for (i = 0; i < count; i++) {
2049 struct dma_fence *fence = fences[i];
2050 cb[i].proc = curproc;
2051 if (dma_fence_add_callback(fence, &cb[i].base,
2052 dma_fence_default_wait_cb)) {
2053 if (idx)
2054 *idx = i;
2055 goto cb_cleanup;
2056 }
2057 }
2058
2059 end = jiffies + timeout;
2060 for (ret = timeout; ret > 0; ret = MAX(0, end - jiffies)) {
2061 if (dma_fence_test_signaled_any(fences, count, idx))
2062 break;
2063 err = tsleep(curproc, intr ? PCATCH : 0, "dfwat", ret);
2064 if (err == EINTR || err == ERESTART) {
2065 ret = -ERESTARTSYS;
2066 break;
2067 }
2068 }
2069
2070 cb_cleanup:
2071 while (i-- > 0)
2072 dma_fence_remove_callback(fences[i], &cb[i].base);
2073 free(cb, M_DRM, count * sizeof(*cb));
2074 return ret;
2075 }
2076
2077 void
dma_fence_set_deadline(struct dma_fence * f,ktime_t t)2078 dma_fence_set_deadline(struct dma_fence *f, ktime_t t)
2079 {
2080 if (f->ops->set_deadline == NULL)
2081 return;
2082 if (dma_fence_is_signaled(f) == false)
2083 f->ops->set_deadline(f, t);
2084 }
2085
2086 static struct dma_fence dma_fence_stub;
2087 static struct mutex dma_fence_stub_mtx = MUTEX_INITIALIZER(IPL_TTY);
2088
2089 static const char *
dma_fence_stub_get_name(struct dma_fence * fence)2090 dma_fence_stub_get_name(struct dma_fence *fence)
2091 {
2092 return "stub";
2093 }
2094
2095 static const struct dma_fence_ops dma_fence_stub_ops = {
2096 .get_driver_name = dma_fence_stub_get_name,
2097 .get_timeline_name = dma_fence_stub_get_name,
2098 };
2099
2100 struct dma_fence *
dma_fence_get_stub(void)2101 dma_fence_get_stub(void)
2102 {
2103 mtx_enter(&dma_fence_stub_mtx);
2104 if (dma_fence_stub.ops == NULL) {
2105 dma_fence_init(&dma_fence_stub, &dma_fence_stub_ops,
2106 &dma_fence_stub_mtx, 0, 0);
2107 dma_fence_signal_locked(&dma_fence_stub);
2108 }
2109 mtx_leave(&dma_fence_stub_mtx);
2110
2111 return dma_fence_get(&dma_fence_stub);
2112 }
2113
2114 struct dma_fence *
dma_fence_allocate_private_stub(ktime_t ts)2115 dma_fence_allocate_private_stub(ktime_t ts)
2116 {
2117 struct dma_fence *f = malloc(sizeof(*f), M_DRM,
2118 M_ZERO | M_WAITOK | M_CANFAIL);
2119 if (f == NULL)
2120 return NULL;
2121 dma_fence_init(f, &dma_fence_stub_ops, &dma_fence_stub_mtx, 0, 0);
2122 dma_fence_signal_timestamp(f, ts);
2123 return f;
2124 }
2125
2126 static const char *
dma_fence_array_get_driver_name(struct dma_fence * fence)2127 dma_fence_array_get_driver_name(struct dma_fence *fence)
2128 {
2129 return "dma_fence_array";
2130 }
2131
2132 static const char *
dma_fence_array_get_timeline_name(struct dma_fence * fence)2133 dma_fence_array_get_timeline_name(struct dma_fence *fence)
2134 {
2135 return "unbound";
2136 }
2137
2138 static void
irq_dma_fence_array_work(void * arg)2139 irq_dma_fence_array_work(void *arg)
2140 {
2141 struct dma_fence_array *dfa = (struct dma_fence_array *)arg;
2142 dma_fence_signal(&dfa->base);
2143 dma_fence_put(&dfa->base);
2144 }
2145
2146 static void
dma_fence_array_cb_func(struct dma_fence * f,struct dma_fence_cb * cb)2147 dma_fence_array_cb_func(struct dma_fence *f, struct dma_fence_cb *cb)
2148 {
2149 struct dma_fence_array_cb *array_cb =
2150 container_of(cb, struct dma_fence_array_cb, cb);
2151 struct dma_fence_array *dfa = array_cb->array;
2152
2153 if (atomic_dec_and_test(&dfa->num_pending))
2154 timeout_add(&dfa->to, 1);
2155 else
2156 dma_fence_put(&dfa->base);
2157 }
2158
2159 static bool
dma_fence_array_enable_signaling(struct dma_fence * fence)2160 dma_fence_array_enable_signaling(struct dma_fence *fence)
2161 {
2162 struct dma_fence_array *dfa = to_dma_fence_array(fence);
2163 struct dma_fence_array_cb *cb = (void *)(&dfa[1]);
2164 int i;
2165
2166 for (i = 0; i < dfa->num_fences; ++i) {
2167 cb[i].array = dfa;
2168 dma_fence_get(&dfa->base);
2169 if (dma_fence_add_callback(dfa->fences[i], &cb[i].cb,
2170 dma_fence_array_cb_func)) {
2171 dma_fence_put(&dfa->base);
2172 if (atomic_dec_and_test(&dfa->num_pending))
2173 return false;
2174 }
2175 }
2176
2177 return true;
2178 }
2179
2180 static bool
dma_fence_array_signaled(struct dma_fence * fence)2181 dma_fence_array_signaled(struct dma_fence *fence)
2182 {
2183 struct dma_fence_array *dfa = to_dma_fence_array(fence);
2184
2185 return atomic_read(&dfa->num_pending) <= 0;
2186 }
2187
2188 static void
dma_fence_array_release(struct dma_fence * fence)2189 dma_fence_array_release(struct dma_fence *fence)
2190 {
2191 struct dma_fence_array *dfa = to_dma_fence_array(fence);
2192 int i;
2193
2194 for (i = 0; i < dfa->num_fences; ++i)
2195 dma_fence_put(dfa->fences[i]);
2196
2197 free(dfa->fences, M_DRM, 0);
2198 dma_fence_free(fence);
2199 }
2200
2201 struct dma_fence_array *
dma_fence_array_create(int num_fences,struct dma_fence ** fences,u64 context,unsigned seqno,bool signal_on_any)2202 dma_fence_array_create(int num_fences, struct dma_fence **fences, u64 context,
2203 unsigned seqno, bool signal_on_any)
2204 {
2205 struct dma_fence_array *dfa = malloc(sizeof(*dfa) +
2206 (num_fences * sizeof(struct dma_fence_array_cb)),
2207 M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
2208 if (dfa == NULL)
2209 return NULL;
2210
2211 mtx_init(&dfa->lock, IPL_TTY);
2212 dma_fence_init(&dfa->base, &dma_fence_array_ops, &dfa->lock,
2213 context, seqno);
2214 timeout_set(&dfa->to, irq_dma_fence_array_work, dfa);
2215
2216 dfa->num_fences = num_fences;
2217 atomic_set(&dfa->num_pending, signal_on_any ? 1 : num_fences);
2218 dfa->fences = fences;
2219
2220 return dfa;
2221 }
2222
2223 struct dma_fence *
dma_fence_array_first(struct dma_fence * f)2224 dma_fence_array_first(struct dma_fence *f)
2225 {
2226 struct dma_fence_array *dfa;
2227
2228 if (f == NULL)
2229 return NULL;
2230
2231 if ((dfa = to_dma_fence_array(f)) == NULL)
2232 return f;
2233
2234 if (dfa->num_fences > 0)
2235 return dfa->fences[0];
2236
2237 return NULL;
2238 }
2239
2240 struct dma_fence *
dma_fence_array_next(struct dma_fence * f,unsigned int i)2241 dma_fence_array_next(struct dma_fence *f, unsigned int i)
2242 {
2243 struct dma_fence_array *dfa;
2244
2245 if (f == NULL)
2246 return NULL;
2247
2248 if ((dfa = to_dma_fence_array(f)) == NULL)
2249 return NULL;
2250
2251 if (i < dfa->num_fences)
2252 return dfa->fences[i];
2253
2254 return NULL;
2255 }
2256
2257 const struct dma_fence_ops dma_fence_array_ops = {
2258 .get_driver_name = dma_fence_array_get_driver_name,
2259 .get_timeline_name = dma_fence_array_get_timeline_name,
2260 .enable_signaling = dma_fence_array_enable_signaling,
2261 .signaled = dma_fence_array_signaled,
2262 .release = dma_fence_array_release,
2263 };
2264
2265 int
dma_fence_chain_find_seqno(struct dma_fence ** df,uint64_t seqno)2266 dma_fence_chain_find_seqno(struct dma_fence **df, uint64_t seqno)
2267 {
2268 struct dma_fence_chain *chain;
2269 struct dma_fence *fence;
2270
2271 if (seqno == 0)
2272 return 0;
2273
2274 if ((chain = to_dma_fence_chain(*df)) == NULL)
2275 return -EINVAL;
2276
2277 fence = &chain->base;
2278 if (fence->seqno < seqno)
2279 return -EINVAL;
2280
2281 dma_fence_chain_for_each(*df, fence) {
2282 if ((*df)->context != fence->context)
2283 break;
2284
2285 chain = to_dma_fence_chain(*df);
2286 if (chain->prev_seqno < seqno)
2287 break;
2288 }
2289 dma_fence_put(fence);
2290
2291 return 0;
2292 }
2293
2294 void
dma_fence_chain_init(struct dma_fence_chain * chain,struct dma_fence * prev,struct dma_fence * fence,uint64_t seqno)2295 dma_fence_chain_init(struct dma_fence_chain *chain, struct dma_fence *prev,
2296 struct dma_fence *fence, uint64_t seqno)
2297 {
2298 uint64_t context;
2299
2300 chain->fence = fence;
2301 chain->prev = prev;
2302 mtx_init(&chain->lock, IPL_TTY);
2303
2304 /* if prev is a chain */
2305 if (to_dma_fence_chain(prev) != NULL) {
2306 if (__dma_fence_is_later(seqno, prev->seqno, prev->ops)) {
2307 chain->prev_seqno = prev->seqno;
2308 context = prev->context;
2309 } else {
2310 chain->prev_seqno = 0;
2311 context = dma_fence_context_alloc(1);
2312 seqno = prev->seqno;
2313 }
2314 } else {
2315 chain->prev_seqno = 0;
2316 context = dma_fence_context_alloc(1);
2317 }
2318
2319 dma_fence_init(&chain->base, &dma_fence_chain_ops, &chain->lock,
2320 context, seqno);
2321 }
2322
2323 static const char *
dma_fence_chain_get_driver_name(struct dma_fence * fence)2324 dma_fence_chain_get_driver_name(struct dma_fence *fence)
2325 {
2326 return "dma_fence_chain";
2327 }
2328
2329 static const char *
dma_fence_chain_get_timeline_name(struct dma_fence * fence)2330 dma_fence_chain_get_timeline_name(struct dma_fence *fence)
2331 {
2332 return "unbound";
2333 }
2334
2335 static bool dma_fence_chain_enable_signaling(struct dma_fence *);
2336
2337 static void
dma_fence_chain_timo(void * arg)2338 dma_fence_chain_timo(void *arg)
2339 {
2340 struct dma_fence_chain *chain = (struct dma_fence_chain *)arg;
2341
2342 if (dma_fence_chain_enable_signaling(&chain->base) == false)
2343 dma_fence_signal(&chain->base);
2344 dma_fence_put(&chain->base);
2345 }
2346
2347 static void
dma_fence_chain_cb(struct dma_fence * f,struct dma_fence_cb * cb)2348 dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb)
2349 {
2350 struct dma_fence_chain *chain =
2351 container_of(cb, struct dma_fence_chain, cb);
2352 timeout_set(&chain->to, dma_fence_chain_timo, chain);
2353 timeout_add(&chain->to, 1);
2354 dma_fence_put(f);
2355 }
2356
2357 static bool
dma_fence_chain_enable_signaling(struct dma_fence * fence)2358 dma_fence_chain_enable_signaling(struct dma_fence *fence)
2359 {
2360 struct dma_fence_chain *chain, *h;
2361 struct dma_fence *f;
2362
2363 h = to_dma_fence_chain(fence);
2364 dma_fence_get(&h->base);
2365 dma_fence_chain_for_each(fence, &h->base) {
2366 chain = to_dma_fence_chain(fence);
2367 if (chain == NULL)
2368 f = fence;
2369 else
2370 f = chain->fence;
2371
2372 dma_fence_get(f);
2373 if (!dma_fence_add_callback(f, &h->cb, dma_fence_chain_cb)) {
2374 dma_fence_put(fence);
2375 return true;
2376 }
2377 dma_fence_put(f);
2378 }
2379 dma_fence_put(&h->base);
2380 return false;
2381 }
2382
2383 static bool
dma_fence_chain_signaled(struct dma_fence * fence)2384 dma_fence_chain_signaled(struct dma_fence *fence)
2385 {
2386 struct dma_fence_chain *chain;
2387 struct dma_fence *f;
2388
2389 dma_fence_chain_for_each(fence, fence) {
2390 chain = to_dma_fence_chain(fence);
2391 if (chain == NULL)
2392 f = fence;
2393 else
2394 f = chain->fence;
2395
2396 if (dma_fence_is_signaled(f) == false) {
2397 dma_fence_put(fence);
2398 return false;
2399 }
2400 }
2401 return true;
2402 }
2403
2404 static void
dma_fence_chain_release(struct dma_fence * fence)2405 dma_fence_chain_release(struct dma_fence *fence)
2406 {
2407 struct dma_fence_chain *chain = to_dma_fence_chain(fence);
2408 struct dma_fence_chain *prev_chain;
2409 struct dma_fence *prev;
2410
2411 for (prev = chain->prev; prev != NULL; prev = chain->prev) {
2412 if (kref_read(&prev->refcount) > 1)
2413 break;
2414 if ((prev_chain = to_dma_fence_chain(prev)) == NULL)
2415 break;
2416 chain->prev = prev_chain->prev;
2417 prev_chain->prev = NULL;
2418 dma_fence_put(prev);
2419 }
2420 dma_fence_put(prev);
2421 dma_fence_put(chain->fence);
2422 dma_fence_free(fence);
2423 }
2424
2425 struct dma_fence *
dma_fence_chain_walk(struct dma_fence * fence)2426 dma_fence_chain_walk(struct dma_fence *fence)
2427 {
2428 struct dma_fence_chain *chain = to_dma_fence_chain(fence), *prev_chain;
2429 struct dma_fence *prev, *new_prev, *tmp;
2430
2431 if (chain == NULL) {
2432 dma_fence_put(fence);
2433 return NULL;
2434 }
2435
2436 while ((prev = dma_fence_get(chain->prev)) != NULL) {
2437 prev_chain = to_dma_fence_chain(prev);
2438 if (prev_chain != NULL) {
2439 if (!dma_fence_is_signaled(prev_chain->fence))
2440 break;
2441 new_prev = dma_fence_get(prev_chain->prev);
2442 } else {
2443 if (!dma_fence_is_signaled(prev))
2444 break;
2445 new_prev = NULL;
2446 }
2447 tmp = atomic_cas_ptr(&chain->prev, prev, new_prev);
2448 dma_fence_put(tmp == prev ? prev : new_prev);
2449 dma_fence_put(prev);
2450 }
2451
2452 dma_fence_put(fence);
2453 return prev;
2454 }
2455
2456 const struct dma_fence_ops dma_fence_chain_ops = {
2457 .get_driver_name = dma_fence_chain_get_driver_name,
2458 .get_timeline_name = dma_fence_chain_get_timeline_name,
2459 .enable_signaling = dma_fence_chain_enable_signaling,
2460 .signaled = dma_fence_chain_signaled,
2461 .release = dma_fence_chain_release,
2462 .use_64bit_seqno = true,
2463 };
2464
2465 bool
dma_fence_is_container(struct dma_fence * fence)2466 dma_fence_is_container(struct dma_fence *fence)
2467 {
2468 return (fence->ops == &dma_fence_chain_ops) ||
2469 (fence->ops == &dma_fence_array_ops);
2470 }
2471
2472 int
dmabuf_read(struct file * fp,struct uio * uio,int fflags)2473 dmabuf_read(struct file *fp, struct uio *uio, int fflags)
2474 {
2475 return (ENXIO);
2476 }
2477
2478 int
dmabuf_write(struct file * fp,struct uio * uio,int fflags)2479 dmabuf_write(struct file *fp, struct uio *uio, int fflags)
2480 {
2481 return (ENXIO);
2482 }
2483
2484 int
dmabuf_ioctl(struct file * fp,u_long com,caddr_t data,struct proc * p)2485 dmabuf_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
2486 {
2487 return (ENOTTY);
2488 }
2489
2490 int
dmabuf_kqfilter(struct file * fp,struct knote * kn)2491 dmabuf_kqfilter(struct file *fp, struct knote *kn)
2492 {
2493 return (EINVAL);
2494 }
2495
2496 int
dmabuf_stat(struct file * fp,struct stat * st,struct proc * p)2497 dmabuf_stat(struct file *fp, struct stat *st, struct proc *p)
2498 {
2499 struct dma_buf *dmabuf = fp->f_data;
2500
2501 memset(st, 0, sizeof(*st));
2502 st->st_size = dmabuf->size;
2503 st->st_mode = S_IFIFO; /* XXX */
2504 return (0);
2505 }
2506
2507 int
dmabuf_close(struct file * fp,struct proc * p)2508 dmabuf_close(struct file *fp, struct proc *p)
2509 {
2510 struct dma_buf *dmabuf = fp->f_data;
2511
2512 fp->f_data = NULL;
2513 KERNEL_LOCK();
2514 dmabuf->ops->release(dmabuf);
2515 KERNEL_UNLOCK();
2516 free(dmabuf, M_DRM, sizeof(struct dma_buf));
2517 return (0);
2518 }
2519
2520 int
dmabuf_seek(struct file * fp,off_t * offset,int whence,struct proc * p)2521 dmabuf_seek(struct file *fp, off_t *offset, int whence, struct proc *p)
2522 {
2523 struct dma_buf *dmabuf = fp->f_data;
2524 off_t newoff;
2525
2526 if (*offset != 0)
2527 return (EINVAL);
2528
2529 switch (whence) {
2530 case SEEK_SET:
2531 newoff = 0;
2532 break;
2533 case SEEK_END:
2534 newoff = dmabuf->size;
2535 break;
2536 default:
2537 return (EINVAL);
2538 }
2539 mtx_enter(&fp->f_mtx);
2540 fp->f_offset = newoff;
2541 mtx_leave(&fp->f_mtx);
2542 *offset = newoff;
2543 return (0);
2544 }
2545
2546 const struct fileops dmabufops = {
2547 .fo_read = dmabuf_read,
2548 .fo_write = dmabuf_write,
2549 .fo_ioctl = dmabuf_ioctl,
2550 .fo_kqfilter = dmabuf_kqfilter,
2551 .fo_stat = dmabuf_stat,
2552 .fo_close = dmabuf_close,
2553 .fo_seek = dmabuf_seek,
2554 };
2555
2556 struct dma_buf *
dma_buf_export(const struct dma_buf_export_info * info)2557 dma_buf_export(const struct dma_buf_export_info *info)
2558 {
2559 struct proc *p = curproc;
2560 struct dma_buf *dmabuf;
2561 struct file *fp;
2562
2563 fp = fnew(p);
2564 if (fp == NULL)
2565 return ERR_PTR(-ENFILE);
2566 fp->f_type = DTYPE_DMABUF;
2567 fp->f_ops = &dmabufops;
2568 dmabuf = malloc(sizeof(struct dma_buf), M_DRM, M_WAITOK | M_ZERO);
2569 dmabuf->priv = info->priv;
2570 dmabuf->ops = info->ops;
2571 dmabuf->size = info->size;
2572 dmabuf->file = fp;
2573 fp->f_data = dmabuf;
2574 INIT_LIST_HEAD(&dmabuf->attachments);
2575 return dmabuf;
2576 }
2577
2578 struct dma_buf *
dma_buf_get(int fd)2579 dma_buf_get(int fd)
2580 {
2581 struct proc *p = curproc;
2582 struct filedesc *fdp = p->p_fd;
2583 struct file *fp;
2584
2585 if ((fp = fd_getfile(fdp, fd)) == NULL)
2586 return ERR_PTR(-EBADF);
2587
2588 if (fp->f_type != DTYPE_DMABUF) {
2589 FRELE(fp, p);
2590 return ERR_PTR(-EINVAL);
2591 }
2592
2593 return fp->f_data;
2594 }
2595
2596 void
dma_buf_put(struct dma_buf * dmabuf)2597 dma_buf_put(struct dma_buf *dmabuf)
2598 {
2599 KASSERT(dmabuf);
2600 KASSERT(dmabuf->file);
2601
2602 FRELE(dmabuf->file, curproc);
2603 }
2604
2605 int
dma_buf_fd(struct dma_buf * dmabuf,int flags)2606 dma_buf_fd(struct dma_buf *dmabuf, int flags)
2607 {
2608 struct proc *p = curproc;
2609 struct filedesc *fdp = p->p_fd;
2610 struct file *fp = dmabuf->file;
2611 int fd, cloexec, error;
2612
2613 cloexec = (flags & O_CLOEXEC) ? UF_EXCLOSE : 0;
2614
2615 fdplock(fdp);
2616 restart:
2617 if ((error = fdalloc(p, 0, &fd)) != 0) {
2618 if (error == ENOSPC) {
2619 fdexpand(p);
2620 goto restart;
2621 }
2622 fdpunlock(fdp);
2623 return -error;
2624 }
2625
2626 fdinsert(fdp, fd, cloexec, fp);
2627 fdpunlock(fdp);
2628
2629 return fd;
2630 }
2631
2632 void
get_dma_buf(struct dma_buf * dmabuf)2633 get_dma_buf(struct dma_buf *dmabuf)
2634 {
2635 FREF(dmabuf->file);
2636 }
2637
2638 enum pci_bus_speed
pcie_get_speed_cap(struct pci_dev * pdev)2639 pcie_get_speed_cap(struct pci_dev *pdev)
2640 {
2641 pci_chipset_tag_t pc;
2642 pcitag_t tag;
2643 int pos ;
2644 pcireg_t xcap, lnkcap = 0, lnkcap2 = 0;
2645 pcireg_t id;
2646 enum pci_bus_speed cap = PCI_SPEED_UNKNOWN;
2647 int bus, device, function;
2648
2649 if (pdev == NULL)
2650 return PCI_SPEED_UNKNOWN;
2651
2652 pc = pdev->pc;
2653 tag = pdev->tag;
2654
2655 if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
2656 &pos, NULL))
2657 return PCI_SPEED_UNKNOWN;
2658
2659 id = pci_conf_read(pc, tag, PCI_ID_REG);
2660 pci_decompose_tag(pc, tag, &bus, &device, &function);
2661
2662 /* we've been informed via and serverworks don't make the cut */
2663 if (PCI_VENDOR(id) == PCI_VENDOR_VIATECH ||
2664 PCI_VENDOR(id) == PCI_VENDOR_RCC)
2665 return PCI_SPEED_UNKNOWN;
2666
2667 lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
2668 xcap = pci_conf_read(pc, tag, pos + PCI_PCIE_XCAP);
2669 if (PCI_PCIE_XCAP_VER(xcap) >= 2)
2670 lnkcap2 = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP2);
2671
2672 lnkcap &= 0x0f;
2673 lnkcap2 &= 0xfe;
2674
2675 if (lnkcap2) { /* PCIE GEN 3.0 */
2676 if (lnkcap2 & 0x02)
2677 cap = PCIE_SPEED_2_5GT;
2678 if (lnkcap2 & 0x04)
2679 cap = PCIE_SPEED_5_0GT;
2680 if (lnkcap2 & 0x08)
2681 cap = PCIE_SPEED_8_0GT;
2682 if (lnkcap2 & 0x10)
2683 cap = PCIE_SPEED_16_0GT;
2684 if (lnkcap2 & 0x20)
2685 cap = PCIE_SPEED_32_0GT;
2686 if (lnkcap2 & 0x40)
2687 cap = PCIE_SPEED_64_0GT;
2688 } else {
2689 if (lnkcap & 0x01)
2690 cap = PCIE_SPEED_2_5GT;
2691 if (lnkcap & 0x02)
2692 cap = PCIE_SPEED_5_0GT;
2693 }
2694
2695 DRM_INFO("probing pcie caps for device %d:%d:%d 0x%04x:0x%04x = %x/%x\n",
2696 bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap,
2697 lnkcap2);
2698 return cap;
2699 }
2700
2701 enum pcie_link_width
pcie_get_width_cap(struct pci_dev * pdev)2702 pcie_get_width_cap(struct pci_dev *pdev)
2703 {
2704 pci_chipset_tag_t pc = pdev->pc;
2705 pcitag_t tag = pdev->tag;
2706 int pos ;
2707 pcireg_t lnkcap = 0;
2708 pcireg_t id;
2709 int bus, device, function;
2710
2711 if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
2712 &pos, NULL))
2713 return PCIE_LNK_WIDTH_UNKNOWN;
2714
2715 id = pci_conf_read(pc, tag, PCI_ID_REG);
2716 pci_decompose_tag(pc, tag, &bus, &device, &function);
2717
2718 lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
2719
2720 DRM_INFO("probing pcie width for device %d:%d:%d 0x%04x:0x%04x = %x\n",
2721 bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap);
2722
2723 if (lnkcap)
2724 return (lnkcap & 0x3f0) >> 4;
2725 return PCIE_LNK_WIDTH_UNKNOWN;
2726 }
2727
2728 bool
pcie_aspm_enabled(struct pci_dev * pdev)2729 pcie_aspm_enabled(struct pci_dev *pdev)
2730 {
2731 pci_chipset_tag_t pc = pdev->pc;
2732 pcitag_t tag = pdev->tag;
2733 int pos ;
2734 pcireg_t lcsr;
2735
2736 if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
2737 &pos, NULL))
2738 return false;
2739
2740 lcsr = pci_conf_read(pc, tag, pos + PCI_PCIE_LCSR);
2741 if ((lcsr & (PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1)) != 0)
2742 return true;
2743
2744 return false;
2745 }
2746
2747 static wait_queue_head_t bit_waitq;
2748 wait_queue_head_t var_waitq;
2749 struct mutex wait_bit_mtx = MUTEX_INITIALIZER(IPL_TTY);
2750
2751 int
wait_on_bit(unsigned long * word,int bit,unsigned mode)2752 wait_on_bit(unsigned long *word, int bit, unsigned mode)
2753 {
2754 int err;
2755
2756 if (!test_bit(bit, word))
2757 return 0;
2758
2759 mtx_enter(&wait_bit_mtx);
2760 while (test_bit(bit, word)) {
2761 err = msleep_nsec(word, &wait_bit_mtx, PWAIT | mode, "wtb",
2762 INFSLP);
2763 if (err) {
2764 mtx_leave(&wait_bit_mtx);
2765 return 1;
2766 }
2767 }
2768 mtx_leave(&wait_bit_mtx);
2769 return 0;
2770 }
2771
2772 int
wait_on_bit_timeout(unsigned long * word,int bit,unsigned mode,int timo)2773 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode, int timo)
2774 {
2775 int err;
2776
2777 if (!test_bit(bit, word))
2778 return 0;
2779
2780 mtx_enter(&wait_bit_mtx);
2781 while (test_bit(bit, word)) {
2782 err = msleep(word, &wait_bit_mtx, PWAIT | mode, "wtb", timo);
2783 if (err) {
2784 mtx_leave(&wait_bit_mtx);
2785 return 1;
2786 }
2787 }
2788 mtx_leave(&wait_bit_mtx);
2789 return 0;
2790 }
2791
2792 void
wake_up_bit(void * word,int bit)2793 wake_up_bit(void *word, int bit)
2794 {
2795 mtx_enter(&wait_bit_mtx);
2796 wakeup(word);
2797 mtx_leave(&wait_bit_mtx);
2798 }
2799
2800 void
clear_and_wake_up_bit(int bit,void * word)2801 clear_and_wake_up_bit(int bit, void *word)
2802 {
2803 clear_bit(bit, word);
2804 wake_up_bit(word, bit);
2805 }
2806
2807 wait_queue_head_t *
bit_waitqueue(void * word,int bit)2808 bit_waitqueue(void *word, int bit)
2809 {
2810 /* XXX hash table of wait queues? */
2811 return &bit_waitq;
2812 }
2813
2814 wait_queue_head_t *
__var_waitqueue(void * p)2815 __var_waitqueue(void *p)
2816 {
2817 /* XXX hash table of wait queues? */
2818 return &bit_waitq;
2819 }
2820
2821 struct workqueue_struct *system_wq;
2822 struct workqueue_struct *system_highpri_wq;
2823 struct workqueue_struct *system_unbound_wq;
2824 struct workqueue_struct *system_long_wq;
2825 struct taskq *taskletq;
2826
2827 void
drm_linux_init(void)2828 drm_linux_init(void)
2829 {
2830 system_wq = (struct workqueue_struct *)
2831 taskq_create("drmwq", 4, IPL_HIGH, 0);
2832 system_highpri_wq = (struct workqueue_struct *)
2833 taskq_create("drmhpwq", 4, IPL_HIGH, 0);
2834 system_unbound_wq = (struct workqueue_struct *)
2835 taskq_create("drmubwq", 4, IPL_HIGH, 0);
2836 system_long_wq = (struct workqueue_struct *)
2837 taskq_create("drmlwq", 4, IPL_HIGH, 0);
2838
2839 taskletq = taskq_create("drmtskl", 1, IPL_HIGH, 0);
2840
2841 init_waitqueue_head(&bit_waitq);
2842 init_waitqueue_head(&var_waitq);
2843
2844 pool_init(&idr_pool, sizeof(struct idr_entry), 0, IPL_TTY, 0,
2845 "idrpl", NULL);
2846
2847 kmap_atomic_va =
2848 (vaddr_t)km_alloc(PAGE_SIZE, &kv_any, &kp_none, &kd_waitok);
2849 }
2850
2851 void
drm_linux_exit(void)2852 drm_linux_exit(void)
2853 {
2854 pool_destroy(&idr_pool);
2855
2856 taskq_destroy(taskletq);
2857
2858 taskq_destroy((struct taskq *)system_long_wq);
2859 taskq_destroy((struct taskq *)system_unbound_wq);
2860 taskq_destroy((struct taskq *)system_highpri_wq);
2861 taskq_destroy((struct taskq *)system_wq);
2862 }
2863
2864 #define PCIE_ECAP_RESIZE_BAR 0x15
2865 #define RBCAP0 0x04
2866 #define RBCTRL0 0x08
2867 #define RBCTRL_BARINDEX_MASK 0x07
2868 #define RBCTRL_BARSIZE_MASK 0x1f00
2869 #define RBCTRL_BARSIZE_SHIFT 8
2870
2871 /* size in MB is 1 << nsize */
2872 int
pci_resize_resource(struct pci_dev * pdev,int bar,int nsize)2873 pci_resize_resource(struct pci_dev *pdev, int bar, int nsize)
2874 {
2875 pcireg_t reg;
2876 uint32_t offset, capid;
2877
2878 KASSERT(bar == 0);
2879
2880 offset = PCI_PCIE_ECAP;
2881
2882 /* search PCI Express Extended Capabilities */
2883 do {
2884 reg = pci_conf_read(pdev->pc, pdev->tag, offset);
2885 capid = PCI_PCIE_ECAP_ID(reg);
2886 if (capid == PCIE_ECAP_RESIZE_BAR)
2887 break;
2888 offset = PCI_PCIE_ECAP_NEXT(reg);
2889 } while (capid != 0);
2890
2891 if (capid == 0) {
2892 printf("%s: could not find resize bar cap!\n", __func__);
2893 return -ENOTSUP;
2894 }
2895
2896 reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCAP0);
2897
2898 if ((reg & (1 << (nsize + 4))) == 0) {
2899 printf("%s size not supported\n", __func__);
2900 return -ENOTSUP;
2901 }
2902
2903 reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCTRL0);
2904 if ((reg & RBCTRL_BARINDEX_MASK) != 0) {
2905 printf("%s BAR index not 0\n", __func__);
2906 return -EINVAL;
2907 }
2908
2909 reg &= ~RBCTRL_BARSIZE_MASK;
2910 reg |= (nsize << RBCTRL_BARSIZE_SHIFT) & RBCTRL_BARSIZE_MASK;
2911
2912 pci_conf_write(pdev->pc, pdev->tag, offset + RBCTRL0, reg);
2913
2914 return 0;
2915 }
2916
2917 TAILQ_HEAD(, shrinker) shrinkers = TAILQ_HEAD_INITIALIZER(shrinkers);
2918
2919 int
register_shrinker(struct shrinker * shrinker,const char * format,...)2920 register_shrinker(struct shrinker *shrinker, const char *format, ...)
2921 {
2922 TAILQ_INSERT_TAIL(&shrinkers, shrinker, next);
2923 return 0;
2924 }
2925
2926 void
unregister_shrinker(struct shrinker * shrinker)2927 unregister_shrinker(struct shrinker *shrinker)
2928 {
2929 TAILQ_REMOVE(&shrinkers, shrinker, next);
2930 }
2931
2932 void
drmbackoff(long npages)2933 drmbackoff(long npages)
2934 {
2935 struct shrink_control sc;
2936 struct shrinker *shrinker;
2937 u_long ret;
2938
2939 shrinker = TAILQ_FIRST(&shrinkers);
2940 while (shrinker && npages > 0) {
2941 sc.nr_to_scan = npages;
2942 ret = shrinker->scan_objects(shrinker, &sc);
2943 npages -= ret;
2944 shrinker = TAILQ_NEXT(shrinker, next);
2945 }
2946 }
2947
2948 void *
bitmap_zalloc(u_int n,gfp_t flags)2949 bitmap_zalloc(u_int n, gfp_t flags)
2950 {
2951 return kcalloc(BITS_TO_LONGS(n), sizeof(long), flags);
2952 }
2953
2954 void
bitmap_free(void * p)2955 bitmap_free(void *p)
2956 {
2957 kfree(p);
2958 }
2959
2960 int
atomic_dec_and_mutex_lock(volatile int * v,struct rwlock * lock)2961 atomic_dec_and_mutex_lock(volatile int *v, struct rwlock *lock)
2962 {
2963 if (atomic_add_unless(v, -1, 1))
2964 return 0;
2965
2966 rw_enter_write(lock);
2967 if (atomic_dec_return(v) == 0)
2968 return 1;
2969 rw_exit_write(lock);
2970 return 0;
2971 }
2972
2973 int
printk(const char * fmt,...)2974 printk(const char *fmt, ...)
2975 {
2976 int ret, level;
2977 va_list ap;
2978
2979 if (fmt != NULL && *fmt == '\001') {
2980 level = fmt[1];
2981 #ifndef DRMDEBUG
2982 if (level >= KERN_INFO[1] && level <= '9')
2983 return 0;
2984 #endif
2985 fmt += 2;
2986 }
2987
2988 va_start(ap, fmt);
2989 ret = vprintf(fmt, ap);
2990 va_end(ap);
2991
2992 return ret;
2993 }
2994
2995 #define START(node) ((node)->start)
2996 #define LAST(node) ((node)->last)
2997
2998 struct interval_tree_node *
interval_tree_iter_first(struct rb_root_cached * root,unsigned long start,unsigned long last)2999 interval_tree_iter_first(struct rb_root_cached *root, unsigned long start,
3000 unsigned long last)
3001 {
3002 struct interval_tree_node *node;
3003 struct rb_node *rb;
3004
3005 for (rb = rb_first_cached(root); rb; rb = rb_next(rb)) {
3006 node = rb_entry(rb, typeof(*node), rb);
3007 if (LAST(node) >= start && START(node) <= last)
3008 return node;
3009 }
3010 return NULL;
3011 }
3012
3013 void
interval_tree_remove(struct interval_tree_node * node,struct rb_root_cached * root)3014 interval_tree_remove(struct interval_tree_node *node,
3015 struct rb_root_cached *root)
3016 {
3017 rb_erase_cached(&node->rb, root);
3018 }
3019
3020 void
interval_tree_insert(struct interval_tree_node * node,struct rb_root_cached * root)3021 interval_tree_insert(struct interval_tree_node *node,
3022 struct rb_root_cached *root)
3023 {
3024 struct rb_node **iter = &root->rb_root.rb_node;
3025 struct rb_node *parent = NULL;
3026 struct interval_tree_node *iter_node;
3027
3028 while (*iter) {
3029 parent = *iter;
3030 iter_node = rb_entry(*iter, struct interval_tree_node, rb);
3031
3032 if (node->start < iter_node->start)
3033 iter = &(*iter)->rb_left;
3034 else
3035 iter = &(*iter)->rb_right;
3036 }
3037
3038 rb_link_node(&node->rb, parent, iter);
3039 rb_insert_color_cached(&node->rb, root, false);
3040 }
3041
3042 int
syncfile_read(struct file * fp,struct uio * uio,int fflags)3043 syncfile_read(struct file *fp, struct uio *uio, int fflags)
3044 {
3045 return ENXIO;
3046 }
3047
3048 int
syncfile_write(struct file * fp,struct uio * uio,int fflags)3049 syncfile_write(struct file *fp, struct uio *uio, int fflags)
3050 {
3051 return ENXIO;
3052 }
3053
3054 int
syncfile_ioctl(struct file * fp,u_long com,caddr_t data,struct proc * p)3055 syncfile_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
3056 {
3057 return ENOTTY;
3058 }
3059
3060 int
syncfile_kqfilter(struct file * fp,struct knote * kn)3061 syncfile_kqfilter(struct file *fp, struct knote *kn)
3062 {
3063 return EINVAL;
3064 }
3065
3066 int
syncfile_stat(struct file * fp,struct stat * st,struct proc * p)3067 syncfile_stat(struct file *fp, struct stat *st, struct proc *p)
3068 {
3069 memset(st, 0, sizeof(*st));
3070 st->st_mode = S_IFIFO; /* XXX */
3071 return 0;
3072 }
3073
3074 int
syncfile_close(struct file * fp,struct proc * p)3075 syncfile_close(struct file *fp, struct proc *p)
3076 {
3077 struct sync_file *sf = fp->f_data;
3078
3079 dma_fence_put(sf->fence);
3080 fp->f_data = NULL;
3081 free(sf, M_DRM, sizeof(struct sync_file));
3082 return 0;
3083 }
3084
3085 int
syncfile_seek(struct file * fp,off_t * offset,int whence,struct proc * p)3086 syncfile_seek(struct file *fp, off_t *offset, int whence, struct proc *p)
3087 {
3088 off_t newoff;
3089
3090 if (*offset != 0)
3091 return EINVAL;
3092
3093 switch (whence) {
3094 case SEEK_SET:
3095 newoff = 0;
3096 break;
3097 case SEEK_END:
3098 newoff = 0;
3099 break;
3100 default:
3101 return EINVAL;
3102 }
3103 mtx_enter(&fp->f_mtx);
3104 fp->f_offset = newoff;
3105 mtx_leave(&fp->f_mtx);
3106 *offset = newoff;
3107 return 0;
3108 }
3109
3110 const struct fileops syncfileops = {
3111 .fo_read = syncfile_read,
3112 .fo_write = syncfile_write,
3113 .fo_ioctl = syncfile_ioctl,
3114 .fo_kqfilter = syncfile_kqfilter,
3115 .fo_stat = syncfile_stat,
3116 .fo_close = syncfile_close,
3117 .fo_seek = syncfile_seek,
3118 };
3119
3120 void
fd_install(int fd,struct file * fp)3121 fd_install(int fd, struct file *fp)
3122 {
3123 struct proc *p = curproc;
3124 struct filedesc *fdp = p->p_fd;
3125
3126 if (fp->f_type != DTYPE_SYNC)
3127 return;
3128
3129 fdplock(fdp);
3130 /* all callers use get_unused_fd_flags(O_CLOEXEC) */
3131 fdinsert(fdp, fd, UF_EXCLOSE, fp);
3132 fdpunlock(fdp);
3133 }
3134
3135 void
fput(struct file * fp)3136 fput(struct file *fp)
3137 {
3138 if (fp->f_type != DTYPE_SYNC)
3139 return;
3140
3141 FRELE(fp, curproc);
3142 }
3143
3144 int
get_unused_fd_flags(unsigned int flags)3145 get_unused_fd_flags(unsigned int flags)
3146 {
3147 struct proc *p = curproc;
3148 struct filedesc *fdp = p->p_fd;
3149 int error, fd;
3150
3151 KASSERT((flags & O_CLOEXEC) != 0);
3152
3153 fdplock(fdp);
3154 retryalloc:
3155 if ((error = fdalloc(p, 0, &fd)) != 0) {
3156 if (error == ENOSPC) {
3157 fdexpand(p);
3158 goto retryalloc;
3159 }
3160 fdpunlock(fdp);
3161 return -1;
3162 }
3163 fdpunlock(fdp);
3164
3165 return fd;
3166 }
3167
3168 void
put_unused_fd(int fd)3169 put_unused_fd(int fd)
3170 {
3171 struct filedesc *fdp = curproc->p_fd;
3172
3173 fdplock(fdp);
3174 fdremove(fdp, fd);
3175 fdpunlock(fdp);
3176 }
3177
3178 struct dma_fence *
sync_file_get_fence(int fd)3179 sync_file_get_fence(int fd)
3180 {
3181 struct proc *p = curproc;
3182 struct filedesc *fdp = p->p_fd;
3183 struct file *fp;
3184 struct sync_file *sf;
3185 struct dma_fence *f;
3186
3187 if ((fp = fd_getfile(fdp, fd)) == NULL)
3188 return NULL;
3189
3190 if (fp->f_type != DTYPE_SYNC) {
3191 FRELE(fp, p);
3192 return NULL;
3193 }
3194 sf = fp->f_data;
3195 f = dma_fence_get(sf->fence);
3196 FRELE(sf->file, p);
3197 return f;
3198 }
3199
3200 struct sync_file *
sync_file_create(struct dma_fence * fence)3201 sync_file_create(struct dma_fence *fence)
3202 {
3203 struct proc *p = curproc;
3204 struct sync_file *sf;
3205 struct file *fp;
3206
3207 fp = fnew(p);
3208 if (fp == NULL)
3209 return NULL;
3210 fp->f_type = DTYPE_SYNC;
3211 fp->f_ops = &syncfileops;
3212 sf = malloc(sizeof(struct sync_file), M_DRM, M_WAITOK | M_ZERO);
3213 sf->file = fp;
3214 sf->fence = dma_fence_get(fence);
3215 fp->f_data = sf;
3216 return sf;
3217 }
3218
3219 bool
drm_firmware_drivers_only(void)3220 drm_firmware_drivers_only(void)
3221 {
3222 return false;
3223 }
3224
3225
3226 void *
memremap(phys_addr_t phys_addr,size_t size,int flags)3227 memremap(phys_addr_t phys_addr, size_t size, int flags)
3228 {
3229 STUB();
3230 return NULL;
3231 }
3232
3233 void
memunmap(void * addr)3234 memunmap(void *addr)
3235 {
3236 STUB();
3237 }
3238
3239 #include <linux/platform_device.h>
3240
3241 bus_dma_tag_t
dma_tag_lookup(struct device * dev)3242 dma_tag_lookup(struct device *dev)
3243 {
3244 extern struct cfdriver drm_cd;
3245 struct drm_device *drm;
3246 int i;
3247
3248 for (i = 0; i < drm_cd.cd_ndevs; i++) {
3249 drm = drm_cd.cd_devs[i];
3250 if (drm && drm->dev == dev)
3251 return drm->dmat;
3252 }
3253
3254 return ((struct platform_device *)dev)->dmat;
3255 }
3256
3257 LIST_HEAD(, drm_dmamem) dmamem_list = LIST_HEAD_INITIALIZER(dmamem_list);
3258
3259 void *
dma_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,int gfp)3260 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
3261 int gfp)
3262 {
3263 bus_dma_tag_t dmat = dma_tag_lookup(dev);
3264 struct drm_dmamem *mem;
3265
3266 mem = drm_dmamem_alloc(dmat, size, PAGE_SIZE, 1, size,
3267 BUS_DMA_COHERENT, 0);
3268 if (mem == NULL)
3269 return NULL;
3270 *dma_handle = mem->map->dm_segs[0].ds_addr;
3271 LIST_INSERT_HEAD(&dmamem_list, mem, next);
3272 return mem->kva;
3273 }
3274
3275 void
dma_free_coherent(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_handle)3276 dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
3277 dma_addr_t dma_handle)
3278 {
3279 bus_dma_tag_t dmat = dma_tag_lookup(dev);
3280 struct drm_dmamem *mem;
3281
3282 LIST_FOREACH(mem, &dmamem_list, next) {
3283 if (mem->kva == cpu_addr)
3284 break;
3285 }
3286 KASSERT(mem);
3287 KASSERT(mem->size == size);
3288 KASSERT(mem->map->dm_segs[0].ds_addr == dma_handle);
3289
3290 LIST_REMOVE(mem, next);
3291 drm_dmamem_free(dmat, mem);
3292 }
3293
3294 int
dma_get_sgtable(struct device * dev,struct sg_table * sgt,void * cpu_addr,dma_addr_t dma_addr,size_t size)3295 dma_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
3296 dma_addr_t dma_addr, size_t size)
3297 {
3298 paddr_t pa;
3299 int ret;
3300
3301 if (!pmap_extract(pmap_kernel(), (vaddr_t)cpu_addr, &pa))
3302 return -EINVAL;
3303
3304 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
3305 if (ret)
3306 return ret;
3307
3308 sg_set_page(sgt->sgl, PHYS_TO_VM_PAGE(pa), size, 0);
3309 return 0;
3310 }
3311
3312 dma_addr_t
dma_map_resource(struct device * dev,phys_addr_t phys_addr,size_t size,enum dma_data_direction dir,u_long attr)3313 dma_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size,
3314 enum dma_data_direction dir, u_long attr)
3315 {
3316 bus_dma_tag_t dmat= dma_tag_lookup(dev);
3317 bus_dmamap_t map;
3318 bus_dma_segment_t seg;
3319
3320 if (bus_dmamap_create(dmat, size, 1, size, 0,
3321 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &map))
3322 return DMA_MAPPING_ERROR;
3323 seg.ds_addr = phys_addr;
3324 seg.ds_len = size;
3325 if (bus_dmamap_load_raw(dmat, map, &seg, 1, size, BUS_DMA_WAITOK)) {
3326 bus_dmamap_destroy(dmat, map);
3327 return DMA_MAPPING_ERROR;
3328 }
3329
3330 return map->dm_segs[0].ds_addr;
3331 }
3332
3333 #ifdef BUS_DMA_FIXED
3334
3335 #include <linux/iommu.h>
3336
3337 size_t
iommu_map_sgtable(struct iommu_domain * domain,u_long iova,struct sg_table * sgt,int prot)3338 iommu_map_sgtable(struct iommu_domain *domain, u_long iova,
3339 struct sg_table *sgt, int prot)
3340 {
3341 bus_dma_segment_t seg;
3342 int error;
3343
3344 error = bus_dmamap_create(domain->dmat, sgt->sgl->length, 1,
3345 sgt->sgl->length, 0, BUS_DMA_WAITOK, &sgt->dmamap);
3346 if (error)
3347 return -ENOMEM;
3348
3349 sgt->dmamap->dm_segs[0].ds_addr = iova;
3350 sgt->dmamap->dm_segs[0].ds_len = sgt->sgl->length;
3351 sgt->dmamap->dm_nsegs = 1;
3352 seg.ds_addr = VM_PAGE_TO_PHYS(sgt->sgl->__page);
3353 seg.ds_len = sgt->sgl->length;
3354 error = bus_dmamap_load_raw(domain->dmat, sgt->dmamap, &seg, 1,
3355 sgt->sgl->length, BUS_DMA_WAITOK | BUS_DMA_FIXED);
3356 if (error)
3357 return -ENOMEM;
3358
3359 return sg_dma_len(sgt->sgl);
3360 }
3361
3362 size_t
iommu_unmap(struct iommu_domain * domain,u_long iova,size_t size)3363 iommu_unmap(struct iommu_domain *domain, u_long iova, size_t size)
3364 {
3365 STUB();
3366 return 0;
3367 }
3368
3369 struct iommu_domain *
iommu_get_domain_for_dev(struct device * dev)3370 iommu_get_domain_for_dev(struct device *dev)
3371 {
3372 STUB();
3373 return NULL;
3374 }
3375
3376 phys_addr_t
iommu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)3377 iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
3378 {
3379 STUB();
3380 return 0;
3381 }
3382
3383 struct iommu_domain *
iommu_domain_alloc(struct bus_type * type)3384 iommu_domain_alloc(struct bus_type *type)
3385 {
3386 return malloc(sizeof(struct iommu_domain), M_DEVBUF, M_WAITOK | M_ZERO);
3387 }
3388
3389 int
iommu_attach_device(struct iommu_domain * domain,struct device * dev)3390 iommu_attach_device(struct iommu_domain *domain, struct device *dev)
3391 {
3392 struct platform_device *pdev = (struct platform_device *)dev;
3393
3394 domain->dmat = pdev->dmat;
3395 return 0;
3396 }
3397
3398 #endif
3399
3400 #include <linux/component.h>
3401
3402 struct component {
3403 struct device *dev;
3404 struct device *adev;
3405 const struct component_ops *ops;
3406 SLIST_ENTRY(component) next;
3407 };
3408
3409 SLIST_HEAD(,component) component_list = SLIST_HEAD_INITIALIZER(component_list);
3410
3411 int
component_add(struct device * dev,const struct component_ops * ops)3412 component_add(struct device *dev, const struct component_ops *ops)
3413 {
3414 struct component *component;
3415
3416 component = malloc(sizeof(*component), M_DEVBUF, M_WAITOK | M_ZERO);
3417 component->dev = dev;
3418 component->ops = ops;
3419 SLIST_INSERT_HEAD(&component_list, component, next);
3420 return 0;
3421 }
3422
3423 int
component_add_typed(struct device * dev,const struct component_ops * ops,int type)3424 component_add_typed(struct device *dev, const struct component_ops *ops,
3425 int type)
3426 {
3427 return component_add(dev, ops);
3428 }
3429
3430 int
component_bind_all(struct device * dev,void * data)3431 component_bind_all(struct device *dev, void *data)
3432 {
3433 struct component *component;
3434 int ret = 0;
3435
3436 SLIST_FOREACH(component, &component_list, next) {
3437 if (component->adev == dev) {
3438 ret = component->ops->bind(component->dev, NULL, data);
3439 if (ret)
3440 break;
3441 }
3442 }
3443
3444 return ret;
3445 }
3446
3447 struct component_match_entry {
3448 int (*compare)(struct device *, void *);
3449 void *data;
3450 };
3451
3452 struct component_match {
3453 struct component_match_entry match[4];
3454 int nmatches;
3455 };
3456
3457 int
component_master_add_with_match(struct device * dev,const struct component_master_ops * ops,struct component_match * match)3458 component_master_add_with_match(struct device *dev,
3459 const struct component_master_ops *ops, struct component_match *match)
3460 {
3461 struct component *component;
3462 int found = 0;
3463 int i, ret;
3464
3465 SLIST_FOREACH(component, &component_list, next) {
3466 for (i = 0; i < match->nmatches; i++) {
3467 struct component_match_entry *m = &match->match[i];
3468 if (m->compare(component->dev, m->data)) {
3469 component->adev = dev;
3470 found = 1;
3471 break;
3472 }
3473 }
3474 }
3475
3476 if (found) {
3477 ret = ops->bind(dev);
3478 if (ret)
3479 return ret;
3480 }
3481
3482 return 0;
3483 }
3484
3485 #ifdef __HAVE_FDT
3486
3487 #include <linux/platform_device.h>
3488 #include <dev/ofw/openfirm.h>
3489 #include <dev/ofw/fdt.h>
3490 #include <machine/fdt.h>
3491
3492 LIST_HEAD(, platform_device) pdev_list = LIST_HEAD_INITIALIZER(pdev_list);
3493
3494 void
platform_device_register(struct platform_device * pdev)3495 platform_device_register(struct platform_device *pdev)
3496 {
3497 int i;
3498
3499 pdev->num_resources = pdev->faa->fa_nreg;
3500 if (pdev->faa->fa_nreg > 0) {
3501 pdev->resource = mallocarray(pdev->faa->fa_nreg,
3502 sizeof(*pdev->resource), M_DEVBUF, M_WAITOK | M_ZERO);
3503 for (i = 0; i < pdev->faa->fa_nreg; i++) {
3504 pdev->resource[i].start = pdev->faa->fa_reg[i].addr;
3505 pdev->resource[i].end = pdev->faa->fa_reg[i].addr +
3506 pdev->faa->fa_reg[i].size - 1;
3507 }
3508 }
3509
3510 pdev->parent = pdev->dev.dv_parent;
3511 pdev->node = pdev->faa->fa_node;
3512 pdev->iot = pdev->faa->fa_iot;
3513 pdev->dmat = pdev->faa->fa_dmat;
3514 LIST_INSERT_HEAD(&pdev_list, pdev, next);
3515 }
3516
3517
3518 struct resource *
platform_get_resource(struct platform_device * pdev,u_int type,u_int num)3519 platform_get_resource(struct platform_device *pdev, u_int type, u_int num)
3520 {
3521 KASSERT(num < pdev->num_resources);
3522 return &pdev->resource[num];
3523 }
3524
3525 void __iomem *
devm_platform_ioremap_resource_byname(struct platform_device * pdev,const char * name)3526 devm_platform_ioremap_resource_byname(struct platform_device *pdev,
3527 const char *name)
3528 {
3529 bus_space_handle_t ioh;
3530 int err, idx;
3531
3532 idx = OF_getindex(pdev->node, name, "reg-names");
3533 if (idx == -1 || idx >= pdev->num_resources)
3534 return ERR_PTR(-EINVAL);
3535
3536 err = bus_space_map(pdev->iot, pdev->resource[idx].start,
3537 pdev->resource[idx].end - pdev->resource[idx].start + 1,
3538 BUS_SPACE_MAP_LINEAR, &ioh);
3539 if (err)
3540 return ERR_PTR(-err);
3541
3542 return bus_space_vaddr(pdev->iot, ioh);
3543 }
3544
3545 #include <dev/ofw/ofw_clock.h>
3546 #include <linux/clk.h>
3547
3548 struct clk *
devm_clk_get(struct device * dev,const char * name)3549 devm_clk_get(struct device *dev, const char *name)
3550 {
3551 struct platform_device *pdev = (struct platform_device *)dev;
3552 struct clk *clk;
3553
3554 clk = malloc(sizeof(*clk), M_DEVBUF, M_WAITOK);
3555 clk->freq = clock_get_frequency(pdev->node, name);
3556 return clk;
3557 }
3558
3559 u_long
clk_get_rate(struct clk * clk)3560 clk_get_rate(struct clk *clk)
3561 {
3562 return clk->freq;
3563 }
3564
3565 #include <linux/gpio/consumer.h>
3566 #include <dev/ofw/ofw_gpio.h>
3567
3568 struct gpio_desc {
3569 uint32_t gpios[4];
3570 };
3571
3572 struct gpio_desc *
devm_gpiod_get_optional(struct device * dev,const char * name,int flags)3573 devm_gpiod_get_optional(struct device *dev, const char *name, int flags)
3574 {
3575 struct platform_device *pdev = (struct platform_device *)dev;
3576 struct gpio_desc *desc;
3577 char fullname[128];
3578 int len;
3579
3580 snprintf(fullname, sizeof(fullname), "%s-gpios", name);
3581
3582 desc = malloc(sizeof(*desc), M_DEVBUF, M_WAITOK | M_ZERO);
3583 len = OF_getpropintarray(pdev->node, fullname, desc->gpios,
3584 sizeof(desc->gpios));
3585 KASSERT(len <= sizeof(desc->gpios));
3586 if (len < 0) {
3587 free(desc, M_DEVBUF, sizeof(*desc));
3588 return NULL;
3589 }
3590
3591 switch (flags) {
3592 case GPIOD_IN:
3593 gpio_controller_config_pin(desc->gpios, GPIO_CONFIG_INPUT);
3594 break;
3595 case GPIOD_OUT_HIGH:
3596 gpio_controller_config_pin(desc->gpios, GPIO_CONFIG_OUTPUT);
3597 gpio_controller_set_pin(desc->gpios, 1);
3598 break;
3599 default:
3600 panic("%s: unimplemented flags 0x%x", __func__, flags);
3601 }
3602
3603 return desc;
3604 }
3605
3606 int
gpiod_get_value_cansleep(const struct gpio_desc * desc)3607 gpiod_get_value_cansleep(const struct gpio_desc *desc)
3608 {
3609 return gpio_controller_get_pin(((struct gpio_desc *)desc)->gpios);
3610 }
3611
3612 struct phy {
3613 int node;
3614 const char *name;
3615 };
3616
3617 struct phy *
devm_phy_optional_get(struct device * dev,const char * name)3618 devm_phy_optional_get(struct device *dev, const char *name)
3619 {
3620 struct platform_device *pdev = (struct platform_device *)dev;
3621 struct phy *phy;
3622 int idx;
3623
3624 idx = OF_getindex(pdev->node, name, "phy-names");
3625 if (idx == -1)
3626 return NULL;
3627
3628 phy = malloc(sizeof(*phy), M_DEVBUF, M_WAITOK);
3629 phy->node = pdev->node;
3630 phy->name = name;
3631
3632 return phy;
3633 }
3634
3635 struct bus_type platform_bus_type;
3636
3637 #include <dev/ofw/ofw_misc.h>
3638
3639 #include <linux/of.h>
3640 #include <linux/platform_device.h>
3641
3642 struct device_node *
__of_devnode(void * arg)3643 __of_devnode(void *arg)
3644 {
3645 struct device *dev = container_of(arg, struct device, of_node);
3646 struct platform_device *pdev = (struct platform_device *)dev;
3647
3648 return (struct device_node *)(uintptr_t)pdev->node;
3649 }
3650
3651 int
__of_device_is_compatible(struct device_node * np,const char * compatible)3652 __of_device_is_compatible(struct device_node *np, const char *compatible)
3653 {
3654 return OF_is_compatible((uintptr_t)np, compatible);
3655 }
3656
3657 int
__of_property_present(struct device_node * np,const char * propname)3658 __of_property_present(struct device_node *np, const char *propname)
3659 {
3660 return OF_getpropbool((uintptr_t)np, (char *)propname);
3661 }
3662
3663 int
__of_property_read_variable_u32_array(struct device_node * np,const char * propname,uint32_t * out_values,size_t sz_min,size_t sz_max)3664 __of_property_read_variable_u32_array(struct device_node *np,
3665 const char *propname, uint32_t *out_values, size_t sz_min, size_t sz_max)
3666 {
3667 int len;
3668
3669 len = OF_getpropintarray((uintptr_t)np, (char *)propname, out_values,
3670 sz_max * sizeof(*out_values));
3671 if (len < 0)
3672 return -EINVAL;
3673 if (len == 0)
3674 return -ENODATA;
3675 if (len < sz_min * sizeof(*out_values) ||
3676 len > sz_max * sizeof(*out_values))
3677 return -EOVERFLOW;
3678 if (sz_min == 1 && sz_max == 1)
3679 return 0;
3680 return len / sizeof(*out_values);
3681 }
3682
3683 int
__of_property_read_variable_u64_array(struct device_node * np,const char * propname,uint64_t * out_values,size_t sz_min,size_t sz_max)3684 __of_property_read_variable_u64_array(struct device_node *np,
3685 const char *propname, uint64_t *out_values, size_t sz_min, size_t sz_max)
3686 {
3687 int len;
3688
3689 len = OF_getpropint64array((uintptr_t)np, (char *)propname, out_values,
3690 sz_max * sizeof(*out_values));
3691 if (len < 0)
3692 return -EINVAL;
3693 if (len == 0)
3694 return -ENODATA;
3695 if (len < sz_min * sizeof(*out_values) ||
3696 len > sz_max * sizeof(*out_values))
3697 return -EOVERFLOW;
3698 if (sz_min == 1 && sz_max == 1)
3699 return 0;
3700 return len / sizeof(*out_values);
3701 }
3702
3703 int
__of_property_match_string(struct device_node * np,const char * propname,const char * str)3704 __of_property_match_string(struct device_node *np,
3705 const char *propname, const char *str)
3706 {
3707 int idx;
3708
3709 idx = OF_getindex((uintptr_t)np, str, propname);
3710 if (idx == -1)
3711 return -ENODATA;
3712 return idx;
3713 }
3714
3715 struct device_node *
__of_parse_phandle(struct device_node * np,const char * propname,int idx)3716 __of_parse_phandle(struct device_node *np, const char *propname, int idx)
3717 {
3718 uint32_t phandles[16] = {};
3719 int len, node;
3720
3721 len = OF_getpropintarray((uintptr_t)np, (char *)propname, phandles,
3722 sizeof(phandles));
3723 if (len < (idx + 1) * sizeof(uint32_t))
3724 return NULL;
3725
3726 node = OF_getnodebyphandle(phandles[idx]);
3727 if (node == 0)
3728 return NULL;
3729
3730 return (struct device_node *)(uintptr_t)node;
3731 }
3732
3733 int
__of_parse_phandle_with_args(struct device_node * np,const char * propname,const char * cellsname,int idx,struct of_phandle_args * args)3734 __of_parse_phandle_with_args(struct device_node *np, const char *propname,
3735 const char *cellsname, int idx, struct of_phandle_args *args)
3736 {
3737 uint32_t phandles[16] = {};
3738 int i, len, node;
3739
3740 len = OF_getpropintarray((uintptr_t)np, (char *)propname, phandles,
3741 sizeof(phandles));
3742 if (len < (idx + 1) * sizeof(uint32_t))
3743 return -ENOENT;
3744
3745 node = OF_getnodebyphandle(phandles[idx]);
3746 if (node == 0)
3747 return -ENOENT;
3748
3749 args->np = (struct device_node *)(uintptr_t)node;
3750 args->args_count = OF_getpropint(node, (char *)cellsname, 0);
3751 for (i = 0; i < args->args_count; i++)
3752 args->args[i] = phandles[i + 1];
3753
3754 return 0;
3755 }
3756
3757 int
of_address_to_resource(struct device_node * np,int idx,struct resource * res)3758 of_address_to_resource(struct device_node *np, int idx, struct resource *res)
3759 {
3760 uint64_t reg[16] = {};
3761 int len;
3762
3763 KASSERT(idx < 8);
3764
3765 len = OF_getpropint64array((uintptr_t)np, "reg", reg, sizeof(reg));
3766 if (len < 0 || idx >= (len / (2 * sizeof(uint64_t))))
3767 return -EINVAL;
3768
3769 res->start = reg[2 * idx];
3770 res->end = reg[2 * idx] + reg[2 * idx + 1] - 1;
3771
3772 return 0;
3773 }
3774
3775 static int
next_node(int node)3776 next_node(int node)
3777 {
3778 int peer = OF_peer(node);
3779
3780 while (node && !peer) {
3781 node = OF_parent(node);
3782 if (node)
3783 peer = OF_peer(node);
3784 }
3785
3786 return peer;
3787 }
3788
3789 static int
find_matching_node(int node,const struct of_device_id * id)3790 find_matching_node(int node, const struct of_device_id *id)
3791 {
3792 int child, match;
3793 int i;
3794
3795 for (child = OF_child(node); child; child = OF_peer(child)) {
3796 match = find_matching_node(child, id);
3797 if (match)
3798 return match;
3799 }
3800
3801 for (i = 0; id[i].compatible; i++) {
3802 if (OF_is_compatible(node, id[i].compatible))
3803 return node;
3804 }
3805
3806 return 0;
3807 }
3808
3809 struct device_node *
__matching_node(struct device_node * np,const struct of_device_id * id)3810 __matching_node(struct device_node *np, const struct of_device_id *id)
3811 {
3812 int node = OF_peer(0);
3813 int match;
3814
3815 if (np)
3816 node = next_node((uintptr_t)np);
3817 while (node) {
3818 match = find_matching_node(node, id);
3819 if (match)
3820 return (struct device_node *)(uintptr_t)match;
3821 node = next_node(node);
3822 }
3823
3824 return NULL;
3825 }
3826
3827 struct platform_device *
of_platform_device_create(struct device_node * np,const char * bus_id,struct device * parent)3828 of_platform_device_create(struct device_node *np, const char *bus_id,
3829 struct device *parent)
3830 {
3831 struct platform_device *pdev;
3832
3833 pdev = malloc(sizeof(*pdev), M_DEVBUF, M_WAITOK | M_ZERO);
3834 pdev->node = (intptr_t)np;
3835 pdev->parent = parent;
3836
3837 LIST_INSERT_HEAD(&pdev_list, pdev, next);
3838
3839 return pdev;
3840 }
3841
3842 struct platform_device *
of_find_device_by_node(struct device_node * np)3843 of_find_device_by_node(struct device_node *np)
3844 {
3845 struct platform_device *pdev;
3846
3847 LIST_FOREACH(pdev, &pdev_list, next) {
3848 if (pdev->node == (intptr_t)np)
3849 return pdev;
3850 }
3851
3852 return NULL;
3853 }
3854
3855 int
of_device_is_available(struct device_node * np)3856 of_device_is_available(struct device_node *np)
3857 {
3858 char status[32];
3859
3860 if (OF_getprop((uintptr_t)np, "status", status, sizeof(status)) > 0 &&
3861 strcmp(status, "disabled") == 0)
3862 return 0;
3863
3864 return 1;
3865 }
3866
3867 int
of_dma_configure(struct device * dev,struct device_node * np,int force_dma)3868 of_dma_configure(struct device *dev, struct device_node *np, int force_dma)
3869 {
3870 struct platform_device *pdev = (struct platform_device *)dev;
3871 bus_dma_tag_t dmat = dma_tag_lookup(pdev->parent);
3872
3873 pdev->dmat = iommu_device_map(pdev->node, dmat);
3874 return 0;
3875 }
3876
3877 struct device_node *
__of_get_compatible_child(void * p,const char * compat)3878 __of_get_compatible_child(void *p, const char *compat)
3879 {
3880 struct device *dev = container_of(p, struct device, of_node);
3881 struct platform_device *pdev = (struct platform_device *)dev;
3882 int child;
3883
3884 for (child = OF_child(pdev->node); child; child = OF_peer(child)) {
3885 if (OF_is_compatible(child, compat))
3886 return (struct device_node *)(uintptr_t)child;
3887 }
3888 return NULL;
3889 }
3890
3891 struct device_node *
__of_get_child_by_name(void * p,const char * name)3892 __of_get_child_by_name(void *p, const char *name)
3893 {
3894 struct device *dev = container_of(p, struct device, of_node);
3895 struct platform_device *pdev = (struct platform_device *)dev;
3896 int child;
3897
3898 child = OF_getnodebyname(pdev->node, name);
3899 if (child == 0)
3900 return NULL;
3901 return (struct device_node *)(uintptr_t)child;
3902 }
3903
3904 int
component_compare_of(struct device * dev,void * data)3905 component_compare_of(struct device *dev, void *data)
3906 {
3907 struct platform_device *pdev = (struct platform_device *)dev;
3908
3909 return (pdev->node == (intptr_t)data);
3910 }
3911
3912 void
drm_of_component_match_add(struct device * master,struct component_match ** matchptr,int (* compare)(struct device *,void *),struct device_node * np)3913 drm_of_component_match_add(struct device *master,
3914 struct component_match **matchptr,
3915 int (*compare)(struct device *, void *),
3916 struct device_node *np)
3917 {
3918 struct component_match *match = *matchptr;
3919
3920 if (match == NULL) {
3921 match = malloc(sizeof(struct component_match),
3922 M_DEVBUF, M_WAITOK | M_ZERO);
3923 *matchptr = match;
3924 }
3925
3926 KASSERT(match->nmatches < nitems(match->match));
3927 match->match[match->nmatches].compare = compare;
3928 match->match[match->nmatches].data = np;
3929 match->nmatches++;
3930 }
3931
3932 #endif
3933