1 /* $OpenBSD: drm_linux.c,v 1.114 2024/06/13 18:05:54 kettenis Exp $ */
2 /*
3 * Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org>
4 * Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 #include <sys/types.h>
20 #include <sys/systm.h>
21 #include <sys/param.h>
22 #include <sys/event.h>
23 #include <sys/filedesc.h>
24 #include <sys/kthread.h>
25 #include <sys/stat.h>
26 #include <sys/unistd.h>
27 #include <sys/proc.h>
28 #include <sys/pool.h>
29 #include <sys/fcntl.h>
30
31 #include <dev/pci/ppbreg.h>
32
33 #include <linux/dma-buf.h>
34 #include <linux/mod_devicetable.h>
35 #include <linux/acpi.h>
36 #include <linux/pagevec.h>
37 #include <linux/dma-fence-array.h>
38 #include <linux/dma-fence-chain.h>
39 #include <linux/interrupt.h>
40 #include <linux/err.h>
41 #include <linux/idr.h>
42 #include <linux/scatterlist.h>
43 #include <linux/i2c.h>
44 #include <linux/pci.h>
45 #include <linux/notifier.h>
46 #include <linux/backlight.h>
47 #include <linux/shrinker.h>
48 #include <linux/fb.h>
49 #include <linux/xarray.h>
50 #include <linux/interval_tree.h>
51 #include <linux/kthread.h>
52 #include <linux/processor.h>
53 #include <linux/sync_file.h>
54
55 #include <drm/drm_device.h>
56 #include <drm/drm_connector.h>
57 #include <drm/drm_print.h>
58
59 #if defined(__amd64__) || defined(__i386__)
60 #include "bios.h"
61 #endif
62
63 /* allowed to sleep */
64 void
tasklet_unlock_wait(struct tasklet_struct * ts)65 tasklet_unlock_wait(struct tasklet_struct *ts)
66 {
67 while (test_bit(TASKLET_STATE_RUN, &ts->state))
68 cpu_relax();
69 }
70
71 /* must not sleep */
72 void
tasklet_unlock_spin_wait(struct tasklet_struct * ts)73 tasklet_unlock_spin_wait(struct tasklet_struct *ts)
74 {
75 while (test_bit(TASKLET_STATE_RUN, &ts->state))
76 cpu_relax();
77 }
78
79 void
tasklet_run(void * arg)80 tasklet_run(void *arg)
81 {
82 struct tasklet_struct *ts = arg;
83
84 clear_bit(TASKLET_STATE_SCHED, &ts->state);
85 if (tasklet_trylock(ts)) {
86 if (!atomic_read(&ts->count)) {
87 if (ts->use_callback)
88 ts->callback(ts);
89 else
90 ts->func(ts->data);
91 }
92 tasklet_unlock(ts);
93 }
94 }
95
96 /* 32 bit powerpc lacks 64 bit atomics */
97 #if defined(__powerpc__) && !defined(__powerpc64__)
98 struct mutex atomic64_mtx = MUTEX_INITIALIZER(IPL_HIGH);
99 #endif
100
101 void
set_current_state(int state)102 set_current_state(int state)
103 {
104 int prio = state;
105
106 KASSERT(state != TASK_RUNNING);
107 /* check if already on the sleep list */
108 if (curproc->p_wchan != NULL)
109 return;
110 sleep_setup(curproc, prio, "schto");
111 }
112
113 void
__set_current_state(int state)114 __set_current_state(int state)
115 {
116 struct proc *p = curproc;
117
118 KASSERT(state == TASK_RUNNING);
119 SCHED_LOCK();
120 unsleep(p);
121 p->p_stat = SONPROC;
122 atomic_clearbits_int(&p->p_flag, P_WSLEEP);
123 SCHED_UNLOCK();
124 }
125
126 void
schedule(void)127 schedule(void)
128 {
129 schedule_timeout(MAX_SCHEDULE_TIMEOUT);
130 }
131
132 long
schedule_timeout(long timeout)133 schedule_timeout(long timeout)
134 {
135 unsigned long deadline;
136 int timo = 0;
137
138 KASSERT(!cold);
139
140 if (timeout != MAX_SCHEDULE_TIMEOUT)
141 timo = timeout;
142 if (timeout != MAX_SCHEDULE_TIMEOUT)
143 deadline = jiffies + timeout;
144 sleep_finish(timo, timeout > 0);
145 if (timeout != MAX_SCHEDULE_TIMEOUT)
146 timeout = deadline - jiffies;
147
148 return timeout > 0 ? timeout : 0;
149 }
150
151 long
schedule_timeout_uninterruptible(long timeout)152 schedule_timeout_uninterruptible(long timeout)
153 {
154 tsleep(curproc, PWAIT, "schtou", timeout);
155 return 0;
156 }
157
158 int
wake_up_process(struct proc * p)159 wake_up_process(struct proc *p)
160 {
161 int rv;
162
163 SCHED_LOCK();
164 rv = wakeup_proc(p, 0);
165 SCHED_UNLOCK();
166 return rv;
167 }
168
169 int
autoremove_wake_function(struct wait_queue_entry * wqe,unsigned int mode,int sync,void * key)170 autoremove_wake_function(struct wait_queue_entry *wqe, unsigned int mode,
171 int sync, void *key)
172 {
173 if (wqe->private)
174 wake_up_process(wqe->private);
175 list_del_init(&wqe->entry);
176 return 0;
177 }
178
179 void
prepare_to_wait(wait_queue_head_t * wqh,wait_queue_entry_t * wqe,int state)180 prepare_to_wait(wait_queue_head_t *wqh, wait_queue_entry_t *wqe, int state)
181 {
182 mtx_enter(&wqh->lock);
183 if (list_empty(&wqe->entry))
184 __add_wait_queue(wqh, wqe);
185 mtx_leave(&wqh->lock);
186
187 set_current_state(state);
188 }
189
190 void
finish_wait(wait_queue_head_t * wqh,wait_queue_entry_t * wqe)191 finish_wait(wait_queue_head_t *wqh, wait_queue_entry_t *wqe)
192 {
193 __set_current_state(TASK_RUNNING);
194
195 mtx_enter(&wqh->lock);
196 if (!list_empty(&wqe->entry))
197 list_del_init(&wqe->entry);
198 mtx_leave(&wqh->lock);
199 }
200
201 void
flush_workqueue(struct workqueue_struct * wq)202 flush_workqueue(struct workqueue_struct *wq)
203 {
204 if (cold)
205 return;
206
207 if (wq)
208 taskq_barrier((struct taskq *)wq);
209 }
210
211 bool
flush_work(struct work_struct * work)212 flush_work(struct work_struct *work)
213 {
214 if (cold)
215 return false;
216
217 if (work->tq)
218 taskq_barrier(work->tq);
219 return false;
220 }
221
222 bool
flush_delayed_work(struct delayed_work * dwork)223 flush_delayed_work(struct delayed_work *dwork)
224 {
225 bool ret = false;
226
227 if (cold)
228 return false;
229
230 while (timeout_pending(&dwork->to)) {
231 tsleep(dwork, PWAIT, "fldwto", 1);
232 ret = true;
233 }
234
235 if (dwork->tq)
236 taskq_barrier(dwork->tq);
237 return ret;
238 }
239
240 struct kthread {
241 int (*func)(void *);
242 void *data;
243 struct proc *proc;
244 volatile u_int flags;
245 #define KTHREAD_SHOULDSTOP 0x0000001
246 #define KTHREAD_STOPPED 0x0000002
247 #define KTHREAD_SHOULDPARK 0x0000004
248 #define KTHREAD_PARKED 0x0000008
249 LIST_ENTRY(kthread) next;
250 };
251
252 LIST_HEAD(, kthread) kthread_list = LIST_HEAD_INITIALIZER(kthread_list);
253
254 void
kthread_func(void * arg)255 kthread_func(void *arg)
256 {
257 struct kthread *thread = arg;
258 int ret;
259
260 ret = thread->func(thread->data);
261 thread->flags |= KTHREAD_STOPPED;
262 wakeup(thread);
263 kthread_exit(ret);
264 }
265
266 struct proc *
kthread_run(int (* func)(void *),void * data,const char * name)267 kthread_run(int (*func)(void *), void *data, const char *name)
268 {
269 struct kthread *thread;
270
271 thread = malloc(sizeof(*thread), M_DRM, M_WAITOK);
272 thread->func = func;
273 thread->data = data;
274 thread->flags = 0;
275
276 if (kthread_create(kthread_func, thread, &thread->proc, name)) {
277 free(thread, M_DRM, sizeof(*thread));
278 return ERR_PTR(-ENOMEM);
279 }
280
281 LIST_INSERT_HEAD(&kthread_list, thread, next);
282 return thread->proc;
283 }
284
285 struct kthread_worker *
kthread_create_worker(unsigned int flags,const char * fmt,...)286 kthread_create_worker(unsigned int flags, const char *fmt, ...)
287 {
288 char name[MAXCOMLEN+1];
289 va_list ap;
290
291 struct kthread_worker *w = malloc(sizeof(*w), M_DRM, M_WAITOK);
292 va_start(ap, fmt);
293 vsnprintf(name, sizeof(name), fmt, ap);
294 va_end(ap);
295 w->tq = taskq_create(name, 1, IPL_HIGH, 0);
296
297 return w;
298 }
299
300 void
kthread_destroy_worker(struct kthread_worker * worker)301 kthread_destroy_worker(struct kthread_worker *worker)
302 {
303 taskq_destroy(worker->tq);
304 free(worker, M_DRM, sizeof(*worker));
305
306 }
307
308 void
kthread_init_work(struct kthread_work * work,void (* func)(struct kthread_work *))309 kthread_init_work(struct kthread_work *work, void (*func)(struct kthread_work *))
310 {
311 work->tq = NULL;
312 task_set(&work->task, (void (*)(void *))func, work);
313 }
314
315 bool
kthread_queue_work(struct kthread_worker * worker,struct kthread_work * work)316 kthread_queue_work(struct kthread_worker *worker, struct kthread_work *work)
317 {
318 work->tq = worker->tq;
319 return task_add(work->tq, &work->task);
320 }
321
322 bool
kthread_cancel_work_sync(struct kthread_work * work)323 kthread_cancel_work_sync(struct kthread_work *work)
324 {
325 return task_del(work->tq, &work->task);
326 }
327
328 void
kthread_flush_work(struct kthread_work * work)329 kthread_flush_work(struct kthread_work *work)
330 {
331 if (cold)
332 return;
333
334 if (work->tq)
335 taskq_barrier(work->tq);
336 }
337
338 void
kthread_flush_worker(struct kthread_worker * worker)339 kthread_flush_worker(struct kthread_worker *worker)
340 {
341 if (cold)
342 return;
343
344 if (worker->tq)
345 taskq_barrier(worker->tq);
346 }
347
348 struct kthread *
kthread_lookup(struct proc * p)349 kthread_lookup(struct proc *p)
350 {
351 struct kthread *thread;
352
353 LIST_FOREACH(thread, &kthread_list, next) {
354 if (thread->proc == p)
355 break;
356 }
357 KASSERT(thread);
358
359 return thread;
360 }
361
362 int
kthread_should_park(void)363 kthread_should_park(void)
364 {
365 struct kthread *thread = kthread_lookup(curproc);
366 return (thread->flags & KTHREAD_SHOULDPARK);
367 }
368
369 void
kthread_parkme(void)370 kthread_parkme(void)
371 {
372 struct kthread *thread = kthread_lookup(curproc);
373
374 while (thread->flags & KTHREAD_SHOULDPARK) {
375 thread->flags |= KTHREAD_PARKED;
376 wakeup(thread);
377 tsleep_nsec(thread, PPAUSE, "parkme", INFSLP);
378 thread->flags &= ~KTHREAD_PARKED;
379 }
380 }
381
382 void
kthread_park(struct proc * p)383 kthread_park(struct proc *p)
384 {
385 struct kthread *thread = kthread_lookup(p);
386
387 while ((thread->flags & KTHREAD_PARKED) == 0) {
388 thread->flags |= KTHREAD_SHOULDPARK;
389 wake_up_process(thread->proc);
390 tsleep_nsec(thread, PPAUSE, "park", INFSLP);
391 }
392 }
393
394 void
kthread_unpark(struct proc * p)395 kthread_unpark(struct proc *p)
396 {
397 struct kthread *thread = kthread_lookup(p);
398
399 thread->flags &= ~KTHREAD_SHOULDPARK;
400 wakeup(thread);
401 }
402
403 int
kthread_should_stop(void)404 kthread_should_stop(void)
405 {
406 struct kthread *thread = kthread_lookup(curproc);
407 return (thread->flags & KTHREAD_SHOULDSTOP);
408 }
409
410 void
kthread_stop(struct proc * p)411 kthread_stop(struct proc *p)
412 {
413 struct kthread *thread = kthread_lookup(p);
414
415 while ((thread->flags & KTHREAD_STOPPED) == 0) {
416 thread->flags |= KTHREAD_SHOULDSTOP;
417 kthread_unpark(p);
418 wake_up_process(thread->proc);
419 tsleep_nsec(thread, PPAUSE, "stop", INFSLP);
420 }
421 LIST_REMOVE(thread, next);
422 free(thread, M_DRM, sizeof(*thread));
423 }
424
425 #if NBIOS > 0
426 extern char smbios_board_vendor[];
427 extern char smbios_board_prod[];
428 extern char smbios_board_serial[];
429 #endif
430
431 bool
dmi_match(int slot,const char * str)432 dmi_match(int slot, const char *str)
433 {
434 switch (slot) {
435 case DMI_SYS_VENDOR:
436 if (hw_vendor != NULL &&
437 !strcmp(hw_vendor, str))
438 return true;
439 break;
440 case DMI_PRODUCT_NAME:
441 if (hw_prod != NULL &&
442 !strcmp(hw_prod, str))
443 return true;
444 break;
445 case DMI_PRODUCT_VERSION:
446 if (hw_ver != NULL &&
447 !strcmp(hw_ver, str))
448 return true;
449 break;
450 #if NBIOS > 0
451 case DMI_BOARD_VENDOR:
452 if (strcmp(smbios_board_vendor, str) == 0)
453 return true;
454 break;
455 case DMI_BOARD_NAME:
456 if (strcmp(smbios_board_prod, str) == 0)
457 return true;
458 break;
459 case DMI_BOARD_SERIAL:
460 if (strcmp(smbios_board_serial, str) == 0)
461 return true;
462 break;
463 #else
464 case DMI_BOARD_VENDOR:
465 if (hw_vendor != NULL &&
466 !strcmp(hw_vendor, str))
467 return true;
468 break;
469 case DMI_BOARD_NAME:
470 if (hw_prod != NULL &&
471 !strcmp(hw_prod, str))
472 return true;
473 break;
474 #endif
475 case DMI_NONE:
476 default:
477 return false;
478 }
479
480 return false;
481 }
482
483 static bool
dmi_found(const struct dmi_system_id * dsi)484 dmi_found(const struct dmi_system_id *dsi)
485 {
486 int i, slot;
487
488 for (i = 0; i < nitems(dsi->matches); i++) {
489 slot = dsi->matches[i].slot;
490 if (slot == DMI_NONE)
491 break;
492 if (!dmi_match(slot, dsi->matches[i].substr))
493 return false;
494 }
495
496 return true;
497 }
498
499 const struct dmi_system_id *
dmi_first_match(const struct dmi_system_id * sysid)500 dmi_first_match(const struct dmi_system_id *sysid)
501 {
502 const struct dmi_system_id *dsi;
503
504 for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
505 if (dmi_found(dsi))
506 return dsi;
507 }
508
509 return NULL;
510 }
511
512 #if NBIOS > 0
513 extern char smbios_bios_date[];
514 extern char smbios_bios_version[];
515 #endif
516
517 const char *
dmi_get_system_info(int slot)518 dmi_get_system_info(int slot)
519 {
520 #if NBIOS > 0
521 switch (slot) {
522 case DMI_BIOS_DATE:
523 return smbios_bios_date;
524 case DMI_BIOS_VERSION:
525 return smbios_bios_version;
526 default:
527 printf("%s slot %d not handled\n", __func__, slot);
528 }
529 #endif
530 return NULL;
531 }
532
533 int
dmi_check_system(const struct dmi_system_id * sysid)534 dmi_check_system(const struct dmi_system_id *sysid)
535 {
536 const struct dmi_system_id *dsi;
537 int num = 0;
538
539 for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
540 if (dmi_found(dsi)) {
541 num++;
542 if (dsi->callback && dsi->callback(dsi))
543 break;
544 }
545 }
546 return (num);
547 }
548
549 struct vm_page *
alloc_pages(unsigned int gfp_mask,unsigned int order)550 alloc_pages(unsigned int gfp_mask, unsigned int order)
551 {
552 int flags = (gfp_mask & M_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
553 struct uvm_constraint_range *constraint = &no_constraint;
554 struct pglist mlist;
555
556 if (gfp_mask & M_CANFAIL)
557 flags |= UVM_PLA_FAILOK;
558 if (gfp_mask & M_ZERO)
559 flags |= UVM_PLA_ZERO;
560 if (gfp_mask & __GFP_DMA32)
561 constraint = &dma_constraint;
562
563 TAILQ_INIT(&mlist);
564 if (uvm_pglistalloc(PAGE_SIZE << order, constraint->ucr_low,
565 constraint->ucr_high, PAGE_SIZE, 0, &mlist, 1, flags))
566 return NULL;
567 return TAILQ_FIRST(&mlist);
568 }
569
570 void
__free_pages(struct vm_page * page,unsigned int order)571 __free_pages(struct vm_page *page, unsigned int order)
572 {
573 struct pglist mlist;
574 int i;
575
576 TAILQ_INIT(&mlist);
577 for (i = 0; i < (1 << order); i++)
578 TAILQ_INSERT_TAIL(&mlist, &page[i], pageq);
579 uvm_pglistfree(&mlist);
580 }
581
582 void
__pagevec_release(struct pagevec * pvec)583 __pagevec_release(struct pagevec *pvec)
584 {
585 struct pglist mlist;
586 int i;
587
588 TAILQ_INIT(&mlist);
589 for (i = 0; i < pvec->nr; i++)
590 TAILQ_INSERT_TAIL(&mlist, pvec->pages[i], pageq);
591 uvm_pglistfree(&mlist);
592 pagevec_reinit(pvec);
593 }
594
595 static struct kmem_va_mode kv_physwait = {
596 .kv_map = &phys_map,
597 .kv_wait = 1,
598 };
599
600 void *
kmap(struct vm_page * pg)601 kmap(struct vm_page *pg)
602 {
603 vaddr_t va;
604
605 #if defined (__HAVE_PMAP_DIRECT)
606 va = pmap_map_direct(pg);
607 #else
608 va = (vaddr_t)km_alloc(PAGE_SIZE, &kv_physwait, &kp_none, &kd_waitok);
609 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
610 pmap_update(pmap_kernel());
611 #endif
612 return (void *)va;
613 }
614
615 void
kunmap_va(void * addr)616 kunmap_va(void *addr)
617 {
618 vaddr_t va = (vaddr_t)addr;
619
620 #if defined (__HAVE_PMAP_DIRECT)
621 pmap_unmap_direct(va);
622 #else
623 pmap_kremove(va, PAGE_SIZE);
624 pmap_update(pmap_kernel());
625 km_free((void *)va, PAGE_SIZE, &kv_physwait, &kp_none);
626 #endif
627 }
628
629 vaddr_t kmap_atomic_va;
630 int kmap_atomic_inuse;
631
632 void *
kmap_atomic_prot(struct vm_page * pg,pgprot_t prot)633 kmap_atomic_prot(struct vm_page *pg, pgprot_t prot)
634 {
635 KASSERT(!kmap_atomic_inuse);
636
637 kmap_atomic_inuse = 1;
638 pmap_kenter_pa(kmap_atomic_va, VM_PAGE_TO_PHYS(pg) | prot,
639 PROT_READ | PROT_WRITE);
640 return (void *)kmap_atomic_va;
641 }
642
643 void
kunmap_atomic(void * addr)644 kunmap_atomic(void *addr)
645 {
646 KASSERT(kmap_atomic_inuse);
647
648 pmap_kremove(kmap_atomic_va, PAGE_SIZE);
649 kmap_atomic_inuse = 0;
650 }
651
652 void *
vmap(struct vm_page ** pages,unsigned int npages,unsigned long flags,pgprot_t prot)653 vmap(struct vm_page **pages, unsigned int npages, unsigned long flags,
654 pgprot_t prot)
655 {
656 vaddr_t va;
657 paddr_t pa;
658 int i;
659
660 va = (vaddr_t)km_alloc(PAGE_SIZE * npages, &kv_any, &kp_none,
661 &kd_nowait);
662 if (va == 0)
663 return NULL;
664 for (i = 0; i < npages; i++) {
665 pa = VM_PAGE_TO_PHYS(pages[i]) | prot;
666 pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa,
667 PROT_READ | PROT_WRITE,
668 PROT_READ | PROT_WRITE | PMAP_WIRED);
669 pmap_update(pmap_kernel());
670 }
671
672 return (void *)va;
673 }
674
675 void *
vmap_pfn(unsigned long * pfns,unsigned int npfn,pgprot_t prot)676 vmap_pfn(unsigned long *pfns, unsigned int npfn, pgprot_t prot)
677 {
678 vaddr_t va;
679 paddr_t pa;
680 int i;
681
682 va = (vaddr_t)km_alloc(PAGE_SIZE * npfn, &kv_any, &kp_none,
683 &kd_nowait);
684 if (va == 0)
685 return NULL;
686 for (i = 0; i < npfn; i++) {
687 pa = round_page(pfns[i]) | prot;
688 pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa,
689 PROT_READ | PROT_WRITE,
690 PROT_READ | PROT_WRITE | PMAP_WIRED);
691 pmap_update(pmap_kernel());
692 }
693
694 return (void *)va;
695 }
696
697 void
vunmap(void * addr,size_t size)698 vunmap(void *addr, size_t size)
699 {
700 vaddr_t va = (vaddr_t)addr;
701
702 pmap_remove(pmap_kernel(), va, va + size);
703 pmap_update(pmap_kernel());
704 km_free((void *)va, size, &kv_any, &kp_none);
705 }
706
707 bool
is_vmalloc_addr(const void * p)708 is_vmalloc_addr(const void *p)
709 {
710 vaddr_t min, max, addr;
711
712 min = vm_map_min(kernel_map);
713 max = vm_map_max(kernel_map);
714 addr = (vaddr_t)p;
715
716 if (addr >= min && addr <= max)
717 return true;
718 else
719 return false;
720 }
721
722 void
print_hex_dump(const char * level,const char * prefix_str,int prefix_type,int rowsize,int groupsize,const void * buf,size_t len,bool ascii)723 print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
724 int rowsize, int groupsize, const void *buf, size_t len, bool ascii)
725 {
726 const uint8_t *cbuf = buf;
727 int i;
728
729 for (i = 0; i < len; i++) {
730 if ((i % rowsize) == 0)
731 printf("%s", prefix_str);
732 printf("%02x", cbuf[i]);
733 if ((i % rowsize) == (rowsize - 1))
734 printf("\n");
735 else
736 printf(" ");
737 }
738 }
739
740 void *
memchr_inv(const void * s,int c,size_t n)741 memchr_inv(const void *s, int c, size_t n)
742 {
743 if (n != 0) {
744 const unsigned char *p = s;
745
746 do {
747 if (*p++ != (unsigned char)c)
748 return ((void *)(p - 1));
749 } while (--n != 0);
750 }
751 return (NULL);
752 }
753
754 int
panic_cmp(struct rb_node * a,struct rb_node * b)755 panic_cmp(struct rb_node *a, struct rb_node *b)
756 {
757 panic(__func__);
758 }
759
760 #undef RB_ROOT
761 #define RB_ROOT(head) (head)->rbh_root
762
763 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
764
765 /*
766 * This is a fairly minimal implementation of the Linux "idr" API. It
767 * probably isn't very efficient, and definitely isn't RCU safe. The
768 * pre-load buffer is global instead of per-cpu; we rely on the kernel
769 * lock to make this work. We do randomize our IDs in order to make
770 * them harder to guess.
771 */
772
773 int idr_cmp(struct idr_entry *, struct idr_entry *);
774 SPLAY_PROTOTYPE(idr_tree, idr_entry, entry, idr_cmp);
775
776 struct pool idr_pool;
777 struct idr_entry *idr_entry_cache;
778
779 void
idr_init(struct idr * idr)780 idr_init(struct idr *idr)
781 {
782 SPLAY_INIT(&idr->tree);
783 }
784
785 void
idr_destroy(struct idr * idr)786 idr_destroy(struct idr *idr)
787 {
788 struct idr_entry *id;
789
790 while ((id = SPLAY_MIN(idr_tree, &idr->tree))) {
791 SPLAY_REMOVE(idr_tree, &idr->tree, id);
792 pool_put(&idr_pool, id);
793 }
794 }
795
796 void
idr_preload(unsigned int gfp_mask)797 idr_preload(unsigned int gfp_mask)
798 {
799 int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
800
801 KERNEL_ASSERT_LOCKED();
802
803 if (idr_entry_cache == NULL)
804 idr_entry_cache = pool_get(&idr_pool, flags);
805 }
806
807 int
idr_alloc(struct idr * idr,void * ptr,int start,int end,gfp_t gfp_mask)808 idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
809 {
810 int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
811 struct idr_entry *id;
812 int begin;
813
814 KERNEL_ASSERT_LOCKED();
815
816 if (idr_entry_cache) {
817 id = idr_entry_cache;
818 idr_entry_cache = NULL;
819 } else {
820 id = pool_get(&idr_pool, flags);
821 if (id == NULL)
822 return -ENOMEM;
823 }
824
825 if (end <= 0)
826 end = INT_MAX;
827
828 #ifdef notyet
829 id->id = begin = start + arc4random_uniform(end - start);
830 #else
831 id->id = begin = start;
832 #endif
833 while (SPLAY_INSERT(idr_tree, &idr->tree, id)) {
834 if (id->id == end)
835 id->id = start;
836 else
837 id->id++;
838 if (id->id == begin) {
839 pool_put(&idr_pool, id);
840 return -ENOSPC;
841 }
842 }
843 id->ptr = ptr;
844 return id->id;
845 }
846
847 void *
idr_replace(struct idr * idr,void * ptr,unsigned long id)848 idr_replace(struct idr *idr, void *ptr, unsigned long id)
849 {
850 struct idr_entry find, *res;
851 void *old;
852
853 find.id = id;
854 res = SPLAY_FIND(idr_tree, &idr->tree, &find);
855 if (res == NULL)
856 return ERR_PTR(-ENOENT);
857 old = res->ptr;
858 res->ptr = ptr;
859 return old;
860 }
861
862 void *
idr_remove(struct idr * idr,unsigned long id)863 idr_remove(struct idr *idr, unsigned long id)
864 {
865 struct idr_entry find, *res;
866 void *ptr = NULL;
867
868 find.id = id;
869 res = SPLAY_FIND(idr_tree, &idr->tree, &find);
870 if (res) {
871 SPLAY_REMOVE(idr_tree, &idr->tree, res);
872 ptr = res->ptr;
873 pool_put(&idr_pool, res);
874 }
875 return ptr;
876 }
877
878 void *
idr_find(struct idr * idr,unsigned long id)879 idr_find(struct idr *idr, unsigned long id)
880 {
881 struct idr_entry find, *res;
882
883 find.id = id;
884 res = SPLAY_FIND(idr_tree, &idr->tree, &find);
885 if (res == NULL)
886 return NULL;
887 return res->ptr;
888 }
889
890 void *
idr_get_next(struct idr * idr,int * id)891 idr_get_next(struct idr *idr, int *id)
892 {
893 struct idr_entry *res;
894
895 SPLAY_FOREACH(res, idr_tree, &idr->tree) {
896 if (res->id >= *id) {
897 *id = res->id;
898 return res->ptr;
899 }
900 }
901
902 return NULL;
903 }
904
905 int
idr_for_each(struct idr * idr,int (* func)(int,void *,void *),void * data)906 idr_for_each(struct idr *idr, int (*func)(int, void *, void *), void *data)
907 {
908 struct idr_entry *id;
909 int ret;
910
911 SPLAY_FOREACH(id, idr_tree, &idr->tree) {
912 ret = func(id->id, id->ptr, data);
913 if (ret)
914 return ret;
915 }
916
917 return 0;
918 }
919
920 int
idr_cmp(struct idr_entry * a,struct idr_entry * b)921 idr_cmp(struct idr_entry *a, struct idr_entry *b)
922 {
923 return (a->id < b->id ? -1 : a->id > b->id);
924 }
925
926 SPLAY_GENERATE(idr_tree, idr_entry, entry, idr_cmp);
927
928 void
ida_init(struct ida * ida)929 ida_init(struct ida *ida)
930 {
931 idr_init(&ida->idr);
932 }
933
934 void
ida_destroy(struct ida * ida)935 ida_destroy(struct ida *ida)
936 {
937 idr_destroy(&ida->idr);
938 }
939
940 int
ida_simple_get(struct ida * ida,unsigned int start,unsigned int end,gfp_t gfp_mask)941 ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
942 gfp_t gfp_mask)
943 {
944 return idr_alloc(&ida->idr, NULL, start, end, gfp_mask);
945 }
946
947 void
ida_simple_remove(struct ida * ida,unsigned int id)948 ida_simple_remove(struct ida *ida, unsigned int id)
949 {
950 idr_remove(&ida->idr, id);
951 }
952
953 int
ida_alloc_min(struct ida * ida,unsigned int min,gfp_t gfp)954 ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp)
955 {
956 return idr_alloc(&ida->idr, NULL, min, INT_MAX, gfp);
957 }
958
959 int
ida_alloc_max(struct ida * ida,unsigned int max,gfp_t gfp)960 ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp)
961 {
962 return idr_alloc(&ida->idr, NULL, 0, max - 1, gfp);
963 }
964
965 void
ida_free(struct ida * ida,unsigned int id)966 ida_free(struct ida *ida, unsigned int id)
967 {
968 idr_remove(&ida->idr, id);
969 }
970
971 int
xarray_cmp(struct xarray_entry * a,struct xarray_entry * b)972 xarray_cmp(struct xarray_entry *a, struct xarray_entry *b)
973 {
974 return (a->id < b->id ? -1 : a->id > b->id);
975 }
976
977 SPLAY_PROTOTYPE(xarray_tree, xarray_entry, entry, xarray_cmp);
978 struct pool xa_pool;
979 SPLAY_GENERATE(xarray_tree, xarray_entry, entry, xarray_cmp);
980
981 void
xa_init_flags(struct xarray * xa,gfp_t flags)982 xa_init_flags(struct xarray *xa, gfp_t flags)
983 {
984 static int initialized;
985
986 if (!initialized) {
987 pool_init(&xa_pool, sizeof(struct xarray_entry), 0, IPL_NONE, 0,
988 "xapl", NULL);
989 initialized = 1;
990 }
991 SPLAY_INIT(&xa->xa_tree);
992 if (flags & XA_FLAGS_LOCK_IRQ)
993 mtx_init(&xa->xa_lock, IPL_TTY);
994 else
995 mtx_init(&xa->xa_lock, IPL_NONE);
996 }
997
998 void
xa_destroy(struct xarray * xa)999 xa_destroy(struct xarray *xa)
1000 {
1001 struct xarray_entry *id;
1002
1003 while ((id = SPLAY_MIN(xarray_tree, &xa->xa_tree))) {
1004 SPLAY_REMOVE(xarray_tree, &xa->xa_tree, id);
1005 pool_put(&xa_pool, id);
1006 }
1007 }
1008
1009 /* Don't wrap ids. */
1010 int
__xa_alloc(struct xarray * xa,u32 * id,void * entry,int limit,gfp_t gfp)1011 __xa_alloc(struct xarray *xa, u32 *id, void *entry, int limit, gfp_t gfp)
1012 {
1013 struct xarray_entry *xid;
1014 int start = (xa->xa_flags & XA_FLAGS_ALLOC1) ? 1 : 0;
1015 int begin;
1016
1017 if (gfp & GFP_NOWAIT) {
1018 xid = pool_get(&xa_pool, PR_NOWAIT);
1019 } else {
1020 mtx_leave(&xa->xa_lock);
1021 xid = pool_get(&xa_pool, PR_WAITOK);
1022 mtx_enter(&xa->xa_lock);
1023 }
1024
1025 if (xid == NULL)
1026 return -ENOMEM;
1027
1028 if (limit <= 0)
1029 limit = INT_MAX;
1030
1031 xid->id = begin = start;
1032
1033 while (SPLAY_INSERT(xarray_tree, &xa->xa_tree, xid)) {
1034 if (xid->id == limit)
1035 xid->id = start;
1036 else
1037 xid->id++;
1038 if (xid->id == begin) {
1039 pool_put(&xa_pool, xid);
1040 return -EBUSY;
1041 }
1042 }
1043 xid->ptr = entry;
1044 *id = xid->id;
1045 return 0;
1046 }
1047
1048 /*
1049 * Wrap ids and store next id.
1050 * We walk the entire tree so don't special case wrapping.
1051 * The only caller of this (i915_drm_client.c) doesn't use next id.
1052 */
1053 int
__xa_alloc_cyclic(struct xarray * xa,u32 * id,void * entry,int limit,u32 * next,gfp_t gfp)1054 __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, int limit, u32 *next,
1055 gfp_t gfp)
1056 {
1057 int r = __xa_alloc(xa, id, entry, limit, gfp);
1058 *next = *id + 1;
1059 return r;
1060 }
1061
1062 void *
__xa_erase(struct xarray * xa,unsigned long index)1063 __xa_erase(struct xarray *xa, unsigned long index)
1064 {
1065 struct xarray_entry find, *res;
1066 void *ptr = NULL;
1067
1068 find.id = index;
1069 res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
1070 if (res) {
1071 SPLAY_REMOVE(xarray_tree, &xa->xa_tree, res);
1072 ptr = res->ptr;
1073 pool_put(&xa_pool, res);
1074 }
1075 return ptr;
1076 }
1077
1078 void *
__xa_load(struct xarray * xa,unsigned long index)1079 __xa_load(struct xarray *xa, unsigned long index)
1080 {
1081 struct xarray_entry find, *res;
1082
1083 find.id = index;
1084 res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
1085 if (res == NULL)
1086 return NULL;
1087 return res->ptr;
1088 }
1089
1090 void *
__xa_store(struct xarray * xa,unsigned long index,void * entry,gfp_t gfp)1091 __xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1092 {
1093 struct xarray_entry find, *res;
1094 void *prev;
1095
1096 if (entry == NULL)
1097 return __xa_erase(xa, index);
1098
1099 find.id = index;
1100 res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
1101 if (res != NULL) {
1102 /* index exists */
1103 /* XXX Multislot entries updates not implemented yet */
1104 prev = res->ptr;
1105 res->ptr = entry;
1106 return prev;
1107 }
1108
1109 /* index not found, add new */
1110 if (gfp & GFP_NOWAIT) {
1111 res = pool_get(&xa_pool, PR_NOWAIT);
1112 } else {
1113 mtx_leave(&xa->xa_lock);
1114 res = pool_get(&xa_pool, PR_WAITOK);
1115 mtx_enter(&xa->xa_lock);
1116 }
1117 if (res == NULL)
1118 return XA_ERROR(-ENOMEM);
1119 res->id = index;
1120 res->ptr = entry;
1121 if (SPLAY_INSERT(xarray_tree, &xa->xa_tree, res) != NULL)
1122 return XA_ERROR(-EINVAL);
1123 return NULL; /* no prev entry at index */
1124 }
1125
1126 void *
xa_get_next(struct xarray * xa,unsigned long * index)1127 xa_get_next(struct xarray *xa, unsigned long *index)
1128 {
1129 struct xarray_entry *res;
1130
1131 SPLAY_FOREACH(res, xarray_tree, &xa->xa_tree) {
1132 if (res->id >= *index) {
1133 *index = res->id;
1134 return res->ptr;
1135 }
1136 }
1137
1138 return NULL;
1139 }
1140
1141 int
sg_alloc_table(struct sg_table * table,unsigned int nents,gfp_t gfp_mask)1142 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
1143 {
1144 table->sgl = mallocarray(nents, sizeof(struct scatterlist),
1145 M_DRM, gfp_mask | M_ZERO);
1146 if (table->sgl == NULL)
1147 return -ENOMEM;
1148 table->nents = table->orig_nents = nents;
1149 sg_mark_end(&table->sgl[nents - 1]);
1150 return 0;
1151 }
1152
1153 void
sg_free_table(struct sg_table * table)1154 sg_free_table(struct sg_table *table)
1155 {
1156 free(table->sgl, M_DRM,
1157 table->orig_nents * sizeof(struct scatterlist));
1158 table->orig_nents = 0;
1159 table->sgl = NULL;
1160 }
1161
1162 size_t
sg_copy_from_buffer(struct scatterlist * sgl,unsigned int nents,const void * buf,size_t buflen)1163 sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
1164 const void *buf, size_t buflen)
1165 {
1166 panic("%s", __func__);
1167 }
1168
1169 int
i2c_master_xfer(struct i2c_adapter * adap,struct i2c_msg * msgs,int num)1170 i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1171 {
1172 void *cmd = NULL;
1173 int cmdlen = 0;
1174 int err, ret = 0;
1175 int op;
1176
1177 iic_acquire_bus(&adap->ic, 0);
1178
1179 while (num > 2) {
1180 op = (msgs->flags & I2C_M_RD) ? I2C_OP_READ : I2C_OP_WRITE;
1181 err = iic_exec(&adap->ic, op, msgs->addr, NULL, 0,
1182 msgs->buf, msgs->len, 0);
1183 if (err) {
1184 ret = -err;
1185 goto fail;
1186 }
1187 msgs++;
1188 num--;
1189 ret++;
1190 }
1191
1192 if (num > 1) {
1193 cmd = msgs->buf;
1194 cmdlen = msgs->len;
1195 msgs++;
1196 num--;
1197 ret++;
1198 }
1199
1200 op = (msgs->flags & I2C_M_RD) ?
1201 I2C_OP_READ_WITH_STOP : I2C_OP_WRITE_WITH_STOP;
1202 err = iic_exec(&adap->ic, op, msgs->addr, cmd, cmdlen,
1203 msgs->buf, msgs->len, 0);
1204 if (err) {
1205 ret = -err;
1206 goto fail;
1207 }
1208 msgs++;
1209 ret++;
1210
1211 fail:
1212 iic_release_bus(&adap->ic, 0);
1213
1214 return ret;
1215 }
1216
1217 int
__i2c_transfer(struct i2c_adapter * adap,struct i2c_msg * msgs,int num)1218 __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1219 {
1220 int ret, retries;
1221
1222 retries = adap->retries;
1223 retry:
1224 if (adap->algo)
1225 ret = adap->algo->master_xfer(adap, msgs, num);
1226 else
1227 ret = i2c_master_xfer(adap, msgs, num);
1228 if (ret == -EAGAIN && retries > 0) {
1229 retries--;
1230 goto retry;
1231 }
1232
1233 return ret;
1234 }
1235
1236 int
i2c_transfer(struct i2c_adapter * adap,struct i2c_msg * msgs,int num)1237 i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1238 {
1239 int ret;
1240
1241 if (adap->lock_ops)
1242 adap->lock_ops->lock_bus(adap, 0);
1243
1244 ret = __i2c_transfer(adap, msgs, num);
1245
1246 if (adap->lock_ops)
1247 adap->lock_ops->unlock_bus(adap, 0);
1248
1249 return ret;
1250 }
1251
1252 int
i2c_bb_master_xfer(struct i2c_adapter * adap,struct i2c_msg * msgs,int num)1253 i2c_bb_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1254 {
1255 struct i2c_algo_bit_data *algo = adap->algo_data;
1256 struct i2c_adapter bb;
1257
1258 memset(&bb, 0, sizeof(bb));
1259 bb.ic = algo->ic;
1260 bb.retries = adap->retries;
1261 return i2c_master_xfer(&bb, msgs, num);
1262 }
1263
1264 uint32_t
i2c_bb_functionality(struct i2c_adapter * adap)1265 i2c_bb_functionality(struct i2c_adapter *adap)
1266 {
1267 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1268 }
1269
1270 struct i2c_algorithm i2c_bit_algo = {
1271 .master_xfer = i2c_bb_master_xfer,
1272 .functionality = i2c_bb_functionality
1273 };
1274
1275 int
i2c_bit_add_bus(struct i2c_adapter * adap)1276 i2c_bit_add_bus(struct i2c_adapter *adap)
1277 {
1278 adap->algo = &i2c_bit_algo;
1279 adap->retries = 3;
1280
1281 return 0;
1282 }
1283
1284 #if defined(__amd64__) || defined(__i386__)
1285
1286 /*
1287 * This is a minimal implementation of the Linux vga_get/vga_put
1288 * interface. In all likelihood, it will only work for inteldrm(4) as
1289 * it assumes that if there is another active VGA device in the
1290 * system, it is sitting behind a PCI bridge.
1291 */
1292
1293 extern int pci_enumerate_bus(struct pci_softc *,
1294 int (*)(struct pci_attach_args *), struct pci_attach_args *);
1295
1296 pcitag_t vga_bridge_tag;
1297 int vga_bridge_disabled;
1298
1299 int
vga_disable_bridge(struct pci_attach_args * pa)1300 vga_disable_bridge(struct pci_attach_args *pa)
1301 {
1302 pcireg_t bhlc, bc;
1303
1304 if (pa->pa_domain != 0)
1305 return 0;
1306
1307 bhlc = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
1308 if (PCI_HDRTYPE_TYPE(bhlc) != 1)
1309 return 0;
1310
1311 bc = pci_conf_read(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL);
1312 if ((bc & PPB_BC_VGA_ENABLE) == 0)
1313 return 0;
1314 bc &= ~PPB_BC_VGA_ENABLE;
1315 pci_conf_write(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL, bc);
1316
1317 vga_bridge_tag = pa->pa_tag;
1318 vga_bridge_disabled = 1;
1319
1320 return 1;
1321 }
1322
1323 void
vga_get_uninterruptible(struct pci_dev * pdev,int rsrc)1324 vga_get_uninterruptible(struct pci_dev *pdev, int rsrc)
1325 {
1326 if (pdev->pci->sc_bridgetag != NULL)
1327 return;
1328 pci_enumerate_bus(pdev->pci, vga_disable_bridge, NULL);
1329 }
1330
1331 void
vga_put(struct pci_dev * pdev,int rsrc)1332 vga_put(struct pci_dev *pdev, int rsrc)
1333 {
1334 pcireg_t bc;
1335
1336 if (!vga_bridge_disabled)
1337 return;
1338
1339 bc = pci_conf_read(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL);
1340 bc |= PPB_BC_VGA_ENABLE;
1341 pci_conf_write(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL, bc);
1342
1343 vga_bridge_disabled = 0;
1344 }
1345
1346 #endif
1347
1348 /*
1349 * ACPI types and interfaces.
1350 */
1351
1352 #ifdef __HAVE_ACPI
1353 #include "acpi.h"
1354 #endif
1355
1356 #if NACPI > 0
1357
1358 #include <dev/acpi/acpireg.h>
1359 #include <dev/acpi/acpivar.h>
1360 #include <dev/acpi/amltypes.h>
1361 #include <dev/acpi/dsdt.h>
1362
1363 acpi_status
acpi_get_table(const char * sig,int instance,struct acpi_table_header ** hdr)1364 acpi_get_table(const char *sig, int instance,
1365 struct acpi_table_header **hdr)
1366 {
1367 struct acpi_softc *sc = acpi_softc;
1368 struct acpi_q *entry;
1369
1370 KASSERT(instance == 1);
1371
1372 if (sc == NULL)
1373 return AE_NOT_FOUND;
1374
1375 SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) {
1376 if (memcmp(entry->q_table, sig, strlen(sig)) == 0) {
1377 *hdr = entry->q_table;
1378 return 0;
1379 }
1380 }
1381
1382 return AE_NOT_FOUND;
1383 }
1384
1385 void
acpi_put_table(struct acpi_table_header * hdr)1386 acpi_put_table(struct acpi_table_header *hdr)
1387 {
1388 }
1389
1390 acpi_status
acpi_get_handle(acpi_handle node,const char * name,acpi_handle * rnode)1391 acpi_get_handle(acpi_handle node, const char *name, acpi_handle *rnode)
1392 {
1393 node = aml_searchname(node, name);
1394 if (node == NULL)
1395 return AE_NOT_FOUND;
1396
1397 *rnode = node;
1398 return 0;
1399 }
1400
1401 acpi_status
acpi_get_name(acpi_handle node,int type,struct acpi_buffer * buffer)1402 acpi_get_name(acpi_handle node, int type, struct acpi_buffer *buffer)
1403 {
1404 KASSERT(buffer->length != ACPI_ALLOCATE_BUFFER);
1405 KASSERT(type == ACPI_FULL_PATHNAME);
1406 strlcpy(buffer->pointer, aml_nodename(node), buffer->length);
1407 return 0;
1408 }
1409
1410 acpi_status
acpi_evaluate_object(acpi_handle node,const char * name,struct acpi_object_list * params,struct acpi_buffer * result)1411 acpi_evaluate_object(acpi_handle node, const char *name,
1412 struct acpi_object_list *params, struct acpi_buffer *result)
1413 {
1414 struct aml_value args[4], res;
1415 union acpi_object *obj;
1416 uint8_t *data;
1417 int i;
1418
1419 KASSERT(params->count <= nitems(args));
1420
1421 for (i = 0; i < params->count; i++) {
1422 args[i].type = params->pointer[i].type;
1423 switch (args[i].type) {
1424 case AML_OBJTYPE_INTEGER:
1425 args[i].v_integer = params->pointer[i].integer.value;
1426 break;
1427 case AML_OBJTYPE_BUFFER:
1428 args[i].length = params->pointer[i].buffer.length;
1429 args[i].v_buffer = params->pointer[i].buffer.pointer;
1430 break;
1431 default:
1432 printf("%s: arg type 0x%02x", __func__, args[i].type);
1433 return AE_BAD_PARAMETER;
1434 }
1435 }
1436
1437 if (name) {
1438 node = aml_searchname(node, name);
1439 if (node == NULL)
1440 return AE_NOT_FOUND;
1441 }
1442 if (aml_evalnode(acpi_softc, node, params->count, args, &res)) {
1443 aml_freevalue(&res);
1444 return AE_ERROR;
1445 }
1446
1447 KASSERT(result->length == ACPI_ALLOCATE_BUFFER);
1448
1449 result->length = sizeof(union acpi_object);
1450 switch (res.type) {
1451 case AML_OBJTYPE_BUFFER:
1452 result->length += res.length;
1453 result->pointer = malloc(result->length, M_DRM, M_WAITOK);
1454 obj = (union acpi_object *)result->pointer;
1455 data = (uint8_t *)(obj + 1);
1456 obj->type = res.type;
1457 obj->buffer.length = res.length;
1458 obj->buffer.pointer = data;
1459 memcpy(data, res.v_buffer, res.length);
1460 break;
1461 default:
1462 printf("%s: return type 0x%02x", __func__, res.type);
1463 aml_freevalue(&res);
1464 return AE_ERROR;
1465 }
1466
1467 aml_freevalue(&res);
1468 return 0;
1469 }
1470
1471 SLIST_HEAD(, notifier_block) drm_linux_acpi_notify_list =
1472 SLIST_HEAD_INITIALIZER(drm_linux_acpi_notify_list);
1473
1474 int
drm_linux_acpi_notify(struct aml_node * node,int notify,void * arg)1475 drm_linux_acpi_notify(struct aml_node *node, int notify, void *arg)
1476 {
1477 struct acpi_bus_event event;
1478 struct notifier_block *nb;
1479
1480 event.device_class = ACPI_VIDEO_CLASS;
1481 event.type = notify;
1482
1483 SLIST_FOREACH(nb, &drm_linux_acpi_notify_list, link)
1484 nb->notifier_call(nb, 0, &event);
1485 return 0;
1486 }
1487
1488 int
register_acpi_notifier(struct notifier_block * nb)1489 register_acpi_notifier(struct notifier_block *nb)
1490 {
1491 SLIST_INSERT_HEAD(&drm_linux_acpi_notify_list, nb, link);
1492 return 0;
1493 }
1494
1495 int
unregister_acpi_notifier(struct notifier_block * nb)1496 unregister_acpi_notifier(struct notifier_block *nb)
1497 {
1498 struct notifier_block *tmp;
1499
1500 SLIST_FOREACH(tmp, &drm_linux_acpi_notify_list, link) {
1501 if (tmp == nb) {
1502 SLIST_REMOVE(&drm_linux_acpi_notify_list, nb,
1503 notifier_block, link);
1504 return 0;
1505 }
1506 }
1507
1508 return -ENOENT;
1509 }
1510
1511 const char *
acpi_format_exception(acpi_status status)1512 acpi_format_exception(acpi_status status)
1513 {
1514 switch (status) {
1515 case AE_NOT_FOUND:
1516 return "not found";
1517 case AE_BAD_PARAMETER:
1518 return "bad parameter";
1519 default:
1520 return "unknown";
1521 }
1522 }
1523
1524 int
acpi_target_system_state(void)1525 acpi_target_system_state(void)
1526 {
1527 return acpi_softc->sc_state;
1528 }
1529
1530 #endif
1531
1532 SLIST_HEAD(,backlight_device) backlight_device_list =
1533 SLIST_HEAD_INITIALIZER(backlight_device_list);
1534
1535 void
backlight_do_update_status(void * arg)1536 backlight_do_update_status(void *arg)
1537 {
1538 backlight_update_status(arg);
1539 }
1540
1541 struct backlight_device *
backlight_device_register(const char * name,void * kdev,void * data,const struct backlight_ops * ops,const struct backlight_properties * props)1542 backlight_device_register(const char *name, void *kdev, void *data,
1543 const struct backlight_ops *ops, const struct backlight_properties *props)
1544 {
1545 struct backlight_device *bd;
1546
1547 bd = malloc(sizeof(*bd), M_DRM, M_WAITOK);
1548 bd->ops = ops;
1549 bd->props = *props;
1550 bd->data = data;
1551
1552 task_set(&bd->task, backlight_do_update_status, bd);
1553
1554 SLIST_INSERT_HEAD(&backlight_device_list, bd, next);
1555 bd->name = name;
1556
1557 return bd;
1558 }
1559
1560 void
backlight_device_unregister(struct backlight_device * bd)1561 backlight_device_unregister(struct backlight_device *bd)
1562 {
1563 SLIST_REMOVE(&backlight_device_list, bd, backlight_device, next);
1564 free(bd, M_DRM, sizeof(*bd));
1565 }
1566
1567 void
backlight_schedule_update_status(struct backlight_device * bd)1568 backlight_schedule_update_status(struct backlight_device *bd)
1569 {
1570 task_add(systq, &bd->task);
1571 }
1572
1573 int
backlight_enable(struct backlight_device * bd)1574 backlight_enable(struct backlight_device *bd)
1575 {
1576 if (bd == NULL)
1577 return 0;
1578
1579 bd->props.power = FB_BLANK_UNBLANK;
1580
1581 return bd->ops->update_status(bd);
1582 }
1583
1584 int
backlight_disable(struct backlight_device * bd)1585 backlight_disable(struct backlight_device *bd)
1586 {
1587 if (bd == NULL)
1588 return 0;
1589
1590 bd->props.power = FB_BLANK_POWERDOWN;
1591
1592 return bd->ops->update_status(bd);
1593 }
1594
1595 struct backlight_device *
backlight_device_get_by_name(const char * name)1596 backlight_device_get_by_name(const char *name)
1597 {
1598 struct backlight_device *bd;
1599
1600 SLIST_FOREACH(bd, &backlight_device_list, next) {
1601 if (strcmp(name, bd->name) == 0)
1602 return bd;
1603 }
1604
1605 return NULL;
1606 }
1607
1608 struct drvdata {
1609 struct device *dev;
1610 void *data;
1611 SLIST_ENTRY(drvdata) next;
1612 };
1613
1614 SLIST_HEAD(,drvdata) drvdata_list = SLIST_HEAD_INITIALIZER(drvdata_list);
1615
1616 void
dev_set_drvdata(struct device * dev,void * data)1617 dev_set_drvdata(struct device *dev, void *data)
1618 {
1619 struct drvdata *drvdata;
1620
1621 SLIST_FOREACH(drvdata, &drvdata_list, next) {
1622 if (drvdata->dev == dev) {
1623 drvdata->data = data;
1624 return;
1625 }
1626 }
1627
1628 if (data == NULL)
1629 return;
1630
1631 drvdata = malloc(sizeof(*drvdata), M_DRM, M_WAITOK);
1632 drvdata->dev = dev;
1633 drvdata->data = data;
1634
1635 SLIST_INSERT_HEAD(&drvdata_list, drvdata, next);
1636 }
1637
1638 void *
dev_get_drvdata(struct device * dev)1639 dev_get_drvdata(struct device *dev)
1640 {
1641 struct drvdata *drvdata;
1642
1643 SLIST_FOREACH(drvdata, &drvdata_list, next) {
1644 if (drvdata->dev == dev)
1645 return drvdata->data;
1646 }
1647
1648 return NULL;
1649 }
1650
1651 void
drm_sysfs_hotplug_event(struct drm_device * dev)1652 drm_sysfs_hotplug_event(struct drm_device *dev)
1653 {
1654 knote_locked(&dev->note, NOTE_CHANGE);
1655 }
1656
1657 void
drm_sysfs_connector_hotplug_event(struct drm_connector * connector)1658 drm_sysfs_connector_hotplug_event(struct drm_connector *connector)
1659 {
1660 knote_locked(&connector->dev->note, NOTE_CHANGE);
1661 }
1662
1663 void
drm_sysfs_connector_status_event(struct drm_connector * connector,struct drm_property * property)1664 drm_sysfs_connector_status_event(struct drm_connector *connector,
1665 struct drm_property *property)
1666 {
1667 STUB();
1668 }
1669
1670 void
drm_sysfs_connector_property_event(struct drm_connector * connector,struct drm_property * property)1671 drm_sysfs_connector_property_event(struct drm_connector *connector,
1672 struct drm_property *property)
1673 {
1674 STUB();
1675 }
1676
1677 struct dma_fence *
dma_fence_get(struct dma_fence * fence)1678 dma_fence_get(struct dma_fence *fence)
1679 {
1680 if (fence)
1681 kref_get(&fence->refcount);
1682 return fence;
1683 }
1684
1685 struct dma_fence *
dma_fence_get_rcu(struct dma_fence * fence)1686 dma_fence_get_rcu(struct dma_fence *fence)
1687 {
1688 if (fence)
1689 kref_get(&fence->refcount);
1690 return fence;
1691 }
1692
1693 struct dma_fence *
dma_fence_get_rcu_safe(struct dma_fence ** dfp)1694 dma_fence_get_rcu_safe(struct dma_fence **dfp)
1695 {
1696 struct dma_fence *fence;
1697 if (dfp == NULL)
1698 return NULL;
1699 fence = *dfp;
1700 if (fence)
1701 kref_get(&fence->refcount);
1702 return fence;
1703 }
1704
1705 void
dma_fence_release(struct kref * ref)1706 dma_fence_release(struct kref *ref)
1707 {
1708 struct dma_fence *fence = container_of(ref, struct dma_fence, refcount);
1709 if (fence->ops && fence->ops->release)
1710 fence->ops->release(fence);
1711 else
1712 free(fence, M_DRM, 0);
1713 }
1714
1715 void
dma_fence_put(struct dma_fence * fence)1716 dma_fence_put(struct dma_fence *fence)
1717 {
1718 if (fence)
1719 kref_put(&fence->refcount, dma_fence_release);
1720 }
1721
1722 int
dma_fence_signal_timestamp_locked(struct dma_fence * fence,ktime_t timestamp)1723 dma_fence_signal_timestamp_locked(struct dma_fence *fence, ktime_t timestamp)
1724 {
1725 struct dma_fence_cb *cur, *tmp;
1726 struct list_head cb_list;
1727
1728 if (fence == NULL)
1729 return -EINVAL;
1730
1731 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1732 return -EINVAL;
1733
1734 list_replace(&fence->cb_list, &cb_list);
1735
1736 fence->timestamp = timestamp;
1737 set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
1738
1739 list_for_each_entry_safe(cur, tmp, &cb_list, node) {
1740 INIT_LIST_HEAD(&cur->node);
1741 cur->func(fence, cur);
1742 }
1743
1744 return 0;
1745 }
1746
1747 int
dma_fence_signal(struct dma_fence * fence)1748 dma_fence_signal(struct dma_fence *fence)
1749 {
1750 int r;
1751
1752 if (fence == NULL)
1753 return -EINVAL;
1754
1755 mtx_enter(fence->lock);
1756 r = dma_fence_signal_timestamp_locked(fence, ktime_get());
1757 mtx_leave(fence->lock);
1758
1759 return r;
1760 }
1761
1762 int
dma_fence_signal_locked(struct dma_fence * fence)1763 dma_fence_signal_locked(struct dma_fence *fence)
1764 {
1765 if (fence == NULL)
1766 return -EINVAL;
1767
1768 return dma_fence_signal_timestamp_locked(fence, ktime_get());
1769 }
1770
1771 int
dma_fence_signal_timestamp(struct dma_fence * fence,ktime_t timestamp)1772 dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp)
1773 {
1774 int r;
1775
1776 if (fence == NULL)
1777 return -EINVAL;
1778
1779 mtx_enter(fence->lock);
1780 r = dma_fence_signal_timestamp_locked(fence, timestamp);
1781 mtx_leave(fence->lock);
1782
1783 return r;
1784 }
1785
1786 bool
dma_fence_is_signaled(struct dma_fence * fence)1787 dma_fence_is_signaled(struct dma_fence *fence)
1788 {
1789 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1790 return true;
1791
1792 if (fence->ops->signaled && fence->ops->signaled(fence)) {
1793 dma_fence_signal(fence);
1794 return true;
1795 }
1796
1797 return false;
1798 }
1799
1800 bool
dma_fence_is_signaled_locked(struct dma_fence * fence)1801 dma_fence_is_signaled_locked(struct dma_fence *fence)
1802 {
1803 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1804 return true;
1805
1806 if (fence->ops->signaled && fence->ops->signaled(fence)) {
1807 dma_fence_signal_locked(fence);
1808 return true;
1809 }
1810
1811 return false;
1812 }
1813
1814 ktime_t
dma_fence_timestamp(struct dma_fence * fence)1815 dma_fence_timestamp(struct dma_fence *fence)
1816 {
1817 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1818 while (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
1819 CPU_BUSY_CYCLE();
1820 return fence->timestamp;
1821 } else {
1822 return ktime_get();
1823 }
1824 }
1825
1826 long
dma_fence_wait_timeout(struct dma_fence * fence,bool intr,long timeout)1827 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, long timeout)
1828 {
1829 if (timeout < 0)
1830 return -EINVAL;
1831
1832 if (fence->ops->wait)
1833 return fence->ops->wait(fence, intr, timeout);
1834 else
1835 return dma_fence_default_wait(fence, intr, timeout);
1836 }
1837
1838 long
dma_fence_wait(struct dma_fence * fence,bool intr)1839 dma_fence_wait(struct dma_fence *fence, bool intr)
1840 {
1841 long ret;
1842
1843 ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
1844 if (ret < 0)
1845 return ret;
1846
1847 return 0;
1848 }
1849
1850 void
dma_fence_enable_sw_signaling(struct dma_fence * fence)1851 dma_fence_enable_sw_signaling(struct dma_fence *fence)
1852 {
1853 if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
1854 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
1855 fence->ops->enable_signaling) {
1856 mtx_enter(fence->lock);
1857 if (!fence->ops->enable_signaling(fence))
1858 dma_fence_signal_locked(fence);
1859 mtx_leave(fence->lock);
1860 }
1861 }
1862
1863 void
dma_fence_init(struct dma_fence * fence,const struct dma_fence_ops * ops,struct mutex * lock,uint64_t context,uint64_t seqno)1864 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
1865 struct mutex *lock, uint64_t context, uint64_t seqno)
1866 {
1867 fence->ops = ops;
1868 fence->lock = lock;
1869 fence->context = context;
1870 fence->seqno = seqno;
1871 fence->flags = 0;
1872 fence->error = 0;
1873 kref_init(&fence->refcount);
1874 INIT_LIST_HEAD(&fence->cb_list);
1875 }
1876
1877 int
dma_fence_add_callback(struct dma_fence * fence,struct dma_fence_cb * cb,dma_fence_func_t func)1878 dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
1879 dma_fence_func_t func)
1880 {
1881 int ret = 0;
1882 bool was_set;
1883
1884 if (WARN_ON(!fence || !func))
1885 return -EINVAL;
1886
1887 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1888 INIT_LIST_HEAD(&cb->node);
1889 return -ENOENT;
1890 }
1891
1892 mtx_enter(fence->lock);
1893
1894 was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
1895
1896 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1897 ret = -ENOENT;
1898 else if (!was_set && fence->ops->enable_signaling) {
1899 if (!fence->ops->enable_signaling(fence)) {
1900 dma_fence_signal_locked(fence);
1901 ret = -ENOENT;
1902 }
1903 }
1904
1905 if (!ret) {
1906 cb->func = func;
1907 list_add_tail(&cb->node, &fence->cb_list);
1908 } else
1909 INIT_LIST_HEAD(&cb->node);
1910 mtx_leave(fence->lock);
1911
1912 return ret;
1913 }
1914
1915 bool
dma_fence_remove_callback(struct dma_fence * fence,struct dma_fence_cb * cb)1916 dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
1917 {
1918 bool ret;
1919
1920 mtx_enter(fence->lock);
1921
1922 ret = !list_empty(&cb->node);
1923 if (ret)
1924 list_del_init(&cb->node);
1925
1926 mtx_leave(fence->lock);
1927
1928 return ret;
1929 }
1930
1931 static atomic64_t drm_fence_context_count = ATOMIC64_INIT(1);
1932
1933 uint64_t
dma_fence_context_alloc(unsigned int num)1934 dma_fence_context_alloc(unsigned int num)
1935 {
1936 return atomic64_add_return(num, &drm_fence_context_count) - num;
1937 }
1938
1939 struct default_wait_cb {
1940 struct dma_fence_cb base;
1941 struct proc *proc;
1942 };
1943
1944 static void
dma_fence_default_wait_cb(struct dma_fence * fence,struct dma_fence_cb * cb)1945 dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1946 {
1947 struct default_wait_cb *wait =
1948 container_of(cb, struct default_wait_cb, base);
1949 wake_up_process(wait->proc);
1950 }
1951
1952 long
dma_fence_default_wait(struct dma_fence * fence,bool intr,signed long timeout)1953 dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
1954 {
1955 long ret = timeout ? timeout : 1;
1956 unsigned long end;
1957 int err;
1958 struct default_wait_cb cb;
1959 bool was_set;
1960
1961 KASSERT(timeout <= INT_MAX);
1962
1963 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1964 return ret;
1965
1966 mtx_enter(fence->lock);
1967
1968 was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
1969 &fence->flags);
1970
1971 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1972 goto out;
1973
1974 if (!was_set && fence->ops->enable_signaling) {
1975 if (!fence->ops->enable_signaling(fence)) {
1976 dma_fence_signal_locked(fence);
1977 goto out;
1978 }
1979 }
1980
1981 if (timeout == 0) {
1982 ret = 0;
1983 goto out;
1984 }
1985
1986 cb.base.func = dma_fence_default_wait_cb;
1987 cb.proc = curproc;
1988 list_add(&cb.base.node, &fence->cb_list);
1989
1990 end = jiffies + timeout;
1991 for (ret = timeout; ret > 0; ret = MAX(0, end - jiffies)) {
1992 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1993 break;
1994 err = msleep(curproc, fence->lock, intr ? PCATCH : 0,
1995 "dmafence", ret);
1996 if (err == EINTR || err == ERESTART) {
1997 ret = -ERESTARTSYS;
1998 break;
1999 }
2000 }
2001
2002 if (!list_empty(&cb.base.node))
2003 list_del(&cb.base.node);
2004 out:
2005 mtx_leave(fence->lock);
2006
2007 return ret;
2008 }
2009
2010 static bool
dma_fence_test_signaled_any(struct dma_fence ** fences,uint32_t count,uint32_t * idx)2011 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
2012 uint32_t *idx)
2013 {
2014 int i;
2015
2016 for (i = 0; i < count; ++i) {
2017 struct dma_fence *fence = fences[i];
2018 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
2019 if (idx)
2020 *idx = i;
2021 return true;
2022 }
2023 }
2024 return false;
2025 }
2026
2027 long
dma_fence_wait_any_timeout(struct dma_fence ** fences,uint32_t count,bool intr,long timeout,uint32_t * idx)2028 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
2029 bool intr, long timeout, uint32_t *idx)
2030 {
2031 struct default_wait_cb *cb;
2032 long ret = timeout;
2033 unsigned long end;
2034 int i, err;
2035
2036 KASSERT(timeout <= INT_MAX);
2037
2038 if (timeout == 0) {
2039 for (i = 0; i < count; i++) {
2040 if (dma_fence_is_signaled(fences[i])) {
2041 if (idx)
2042 *idx = i;
2043 return 1;
2044 }
2045 }
2046 return 0;
2047 }
2048
2049 cb = mallocarray(count, sizeof(*cb), M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
2050 if (cb == NULL)
2051 return -ENOMEM;
2052
2053 for (i = 0; i < count; i++) {
2054 struct dma_fence *fence = fences[i];
2055 cb[i].proc = curproc;
2056 if (dma_fence_add_callback(fence, &cb[i].base,
2057 dma_fence_default_wait_cb)) {
2058 if (idx)
2059 *idx = i;
2060 goto cb_cleanup;
2061 }
2062 }
2063
2064 end = jiffies + timeout;
2065 for (ret = timeout; ret > 0; ret = MAX(0, end - jiffies)) {
2066 if (dma_fence_test_signaled_any(fences, count, idx))
2067 break;
2068 err = tsleep(curproc, intr ? PCATCH : 0, "dfwat", ret);
2069 if (err == EINTR || err == ERESTART) {
2070 ret = -ERESTARTSYS;
2071 break;
2072 }
2073 }
2074
2075 cb_cleanup:
2076 while (i-- > 0)
2077 dma_fence_remove_callback(fences[i], &cb[i].base);
2078 free(cb, M_DRM, count * sizeof(*cb));
2079 return ret;
2080 }
2081
2082 void
dma_fence_set_deadline(struct dma_fence * f,ktime_t t)2083 dma_fence_set_deadline(struct dma_fence *f, ktime_t t)
2084 {
2085 if (f->ops->set_deadline == NULL)
2086 return;
2087 if (dma_fence_is_signaled(f) == false)
2088 f->ops->set_deadline(f, t);
2089 }
2090
2091 static struct dma_fence dma_fence_stub;
2092 static struct mutex dma_fence_stub_mtx = MUTEX_INITIALIZER(IPL_TTY);
2093
2094 static const char *
dma_fence_stub_get_name(struct dma_fence * fence)2095 dma_fence_stub_get_name(struct dma_fence *fence)
2096 {
2097 return "stub";
2098 }
2099
2100 static const struct dma_fence_ops dma_fence_stub_ops = {
2101 .get_driver_name = dma_fence_stub_get_name,
2102 .get_timeline_name = dma_fence_stub_get_name,
2103 };
2104
2105 struct dma_fence *
dma_fence_get_stub(void)2106 dma_fence_get_stub(void)
2107 {
2108 mtx_enter(&dma_fence_stub_mtx);
2109 if (dma_fence_stub.ops == NULL) {
2110 dma_fence_init(&dma_fence_stub, &dma_fence_stub_ops,
2111 &dma_fence_stub_mtx, 0, 0);
2112 dma_fence_signal_locked(&dma_fence_stub);
2113 }
2114 mtx_leave(&dma_fence_stub_mtx);
2115
2116 return dma_fence_get(&dma_fence_stub);
2117 }
2118
2119 struct dma_fence *
dma_fence_allocate_private_stub(ktime_t ts)2120 dma_fence_allocate_private_stub(ktime_t ts)
2121 {
2122 struct dma_fence *f = malloc(sizeof(*f), M_DRM,
2123 M_ZERO | M_WAITOK | M_CANFAIL);
2124 if (f == NULL)
2125 return NULL;
2126 dma_fence_init(f, &dma_fence_stub_ops, &dma_fence_stub_mtx, 0, 0);
2127 dma_fence_signal_timestamp(f, ts);
2128 return f;
2129 }
2130
2131 static const char *
dma_fence_array_get_driver_name(struct dma_fence * fence)2132 dma_fence_array_get_driver_name(struct dma_fence *fence)
2133 {
2134 return "dma_fence_array";
2135 }
2136
2137 static const char *
dma_fence_array_get_timeline_name(struct dma_fence * fence)2138 dma_fence_array_get_timeline_name(struct dma_fence *fence)
2139 {
2140 return "unbound";
2141 }
2142
2143 static void
irq_dma_fence_array_work(void * arg)2144 irq_dma_fence_array_work(void *arg)
2145 {
2146 struct dma_fence_array *dfa = (struct dma_fence_array *)arg;
2147 dma_fence_signal(&dfa->base);
2148 dma_fence_put(&dfa->base);
2149 }
2150
2151 static void
dma_fence_array_cb_func(struct dma_fence * f,struct dma_fence_cb * cb)2152 dma_fence_array_cb_func(struct dma_fence *f, struct dma_fence_cb *cb)
2153 {
2154 struct dma_fence_array_cb *array_cb =
2155 container_of(cb, struct dma_fence_array_cb, cb);
2156 struct dma_fence_array *dfa = array_cb->array;
2157
2158 if (atomic_dec_and_test(&dfa->num_pending))
2159 timeout_add(&dfa->to, 1);
2160 else
2161 dma_fence_put(&dfa->base);
2162 }
2163
2164 static bool
dma_fence_array_enable_signaling(struct dma_fence * fence)2165 dma_fence_array_enable_signaling(struct dma_fence *fence)
2166 {
2167 struct dma_fence_array *dfa = to_dma_fence_array(fence);
2168 struct dma_fence_array_cb *cb = (void *)(&dfa[1]);
2169 int i;
2170
2171 for (i = 0; i < dfa->num_fences; ++i) {
2172 cb[i].array = dfa;
2173 dma_fence_get(&dfa->base);
2174 if (dma_fence_add_callback(dfa->fences[i], &cb[i].cb,
2175 dma_fence_array_cb_func)) {
2176 dma_fence_put(&dfa->base);
2177 if (atomic_dec_and_test(&dfa->num_pending))
2178 return false;
2179 }
2180 }
2181
2182 return true;
2183 }
2184
2185 static bool
dma_fence_array_signaled(struct dma_fence * fence)2186 dma_fence_array_signaled(struct dma_fence *fence)
2187 {
2188 struct dma_fence_array *dfa = to_dma_fence_array(fence);
2189
2190 return atomic_read(&dfa->num_pending) <= 0;
2191 }
2192
2193 static void
dma_fence_array_release(struct dma_fence * fence)2194 dma_fence_array_release(struct dma_fence *fence)
2195 {
2196 struct dma_fence_array *dfa = to_dma_fence_array(fence);
2197 int i;
2198
2199 for (i = 0; i < dfa->num_fences; ++i)
2200 dma_fence_put(dfa->fences[i]);
2201
2202 free(dfa->fences, M_DRM, 0);
2203 dma_fence_free(fence);
2204 }
2205
2206 struct dma_fence_array *
dma_fence_array_create(int num_fences,struct dma_fence ** fences,u64 context,unsigned seqno,bool signal_on_any)2207 dma_fence_array_create(int num_fences, struct dma_fence **fences, u64 context,
2208 unsigned seqno, bool signal_on_any)
2209 {
2210 struct dma_fence_array *dfa = malloc(sizeof(*dfa) +
2211 (num_fences * sizeof(struct dma_fence_array_cb)),
2212 M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
2213 if (dfa == NULL)
2214 return NULL;
2215
2216 mtx_init(&dfa->lock, IPL_TTY);
2217 dma_fence_init(&dfa->base, &dma_fence_array_ops, &dfa->lock,
2218 context, seqno);
2219 timeout_set(&dfa->to, irq_dma_fence_array_work, dfa);
2220
2221 dfa->num_fences = num_fences;
2222 atomic_set(&dfa->num_pending, signal_on_any ? 1 : num_fences);
2223 dfa->fences = fences;
2224
2225 return dfa;
2226 }
2227
2228 struct dma_fence *
dma_fence_array_first(struct dma_fence * f)2229 dma_fence_array_first(struct dma_fence *f)
2230 {
2231 struct dma_fence_array *dfa;
2232
2233 if (f == NULL)
2234 return NULL;
2235
2236 if ((dfa = to_dma_fence_array(f)) == NULL)
2237 return f;
2238
2239 if (dfa->num_fences > 0)
2240 return dfa->fences[0];
2241
2242 return NULL;
2243 }
2244
2245 struct dma_fence *
dma_fence_array_next(struct dma_fence * f,unsigned int i)2246 dma_fence_array_next(struct dma_fence *f, unsigned int i)
2247 {
2248 struct dma_fence_array *dfa;
2249
2250 if (f == NULL)
2251 return NULL;
2252
2253 if ((dfa = to_dma_fence_array(f)) == NULL)
2254 return NULL;
2255
2256 if (i < dfa->num_fences)
2257 return dfa->fences[i];
2258
2259 return NULL;
2260 }
2261
2262 const struct dma_fence_ops dma_fence_array_ops = {
2263 .get_driver_name = dma_fence_array_get_driver_name,
2264 .get_timeline_name = dma_fence_array_get_timeline_name,
2265 .enable_signaling = dma_fence_array_enable_signaling,
2266 .signaled = dma_fence_array_signaled,
2267 .release = dma_fence_array_release,
2268 };
2269
2270 int
dma_fence_chain_find_seqno(struct dma_fence ** df,uint64_t seqno)2271 dma_fence_chain_find_seqno(struct dma_fence **df, uint64_t seqno)
2272 {
2273 struct dma_fence_chain *chain;
2274 struct dma_fence *fence;
2275
2276 if (seqno == 0)
2277 return 0;
2278
2279 if ((chain = to_dma_fence_chain(*df)) == NULL)
2280 return -EINVAL;
2281
2282 fence = &chain->base;
2283 if (fence->seqno < seqno)
2284 return -EINVAL;
2285
2286 dma_fence_chain_for_each(*df, fence) {
2287 if ((*df)->context != fence->context)
2288 break;
2289
2290 chain = to_dma_fence_chain(*df);
2291 if (chain->prev_seqno < seqno)
2292 break;
2293 }
2294 dma_fence_put(fence);
2295
2296 return 0;
2297 }
2298
2299 void
dma_fence_chain_init(struct dma_fence_chain * chain,struct dma_fence * prev,struct dma_fence * fence,uint64_t seqno)2300 dma_fence_chain_init(struct dma_fence_chain *chain, struct dma_fence *prev,
2301 struct dma_fence *fence, uint64_t seqno)
2302 {
2303 uint64_t context;
2304
2305 chain->fence = fence;
2306 chain->prev = prev;
2307 mtx_init(&chain->lock, IPL_TTY);
2308
2309 /* if prev is a chain */
2310 if (to_dma_fence_chain(prev) != NULL) {
2311 if (__dma_fence_is_later(seqno, prev->seqno, prev->ops)) {
2312 chain->prev_seqno = prev->seqno;
2313 context = prev->context;
2314 } else {
2315 chain->prev_seqno = 0;
2316 context = dma_fence_context_alloc(1);
2317 seqno = prev->seqno;
2318 }
2319 } else {
2320 chain->prev_seqno = 0;
2321 context = dma_fence_context_alloc(1);
2322 }
2323
2324 dma_fence_init(&chain->base, &dma_fence_chain_ops, &chain->lock,
2325 context, seqno);
2326 }
2327
2328 static const char *
dma_fence_chain_get_driver_name(struct dma_fence * fence)2329 dma_fence_chain_get_driver_name(struct dma_fence *fence)
2330 {
2331 return "dma_fence_chain";
2332 }
2333
2334 static const char *
dma_fence_chain_get_timeline_name(struct dma_fence * fence)2335 dma_fence_chain_get_timeline_name(struct dma_fence *fence)
2336 {
2337 return "unbound";
2338 }
2339
2340 static bool dma_fence_chain_enable_signaling(struct dma_fence *);
2341
2342 static void
dma_fence_chain_timo(void * arg)2343 dma_fence_chain_timo(void *arg)
2344 {
2345 struct dma_fence_chain *chain = (struct dma_fence_chain *)arg;
2346
2347 if (dma_fence_chain_enable_signaling(&chain->base) == false)
2348 dma_fence_signal(&chain->base);
2349 dma_fence_put(&chain->base);
2350 }
2351
2352 static void
dma_fence_chain_cb(struct dma_fence * f,struct dma_fence_cb * cb)2353 dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb)
2354 {
2355 struct dma_fence_chain *chain =
2356 container_of(cb, struct dma_fence_chain, cb);
2357 timeout_set(&chain->to, dma_fence_chain_timo, chain);
2358 timeout_add(&chain->to, 1);
2359 dma_fence_put(f);
2360 }
2361
2362 static bool
dma_fence_chain_enable_signaling(struct dma_fence * fence)2363 dma_fence_chain_enable_signaling(struct dma_fence *fence)
2364 {
2365 struct dma_fence_chain *chain, *h;
2366 struct dma_fence *f;
2367
2368 h = to_dma_fence_chain(fence);
2369 dma_fence_get(&h->base);
2370 dma_fence_chain_for_each(fence, &h->base) {
2371 chain = to_dma_fence_chain(fence);
2372 if (chain == NULL)
2373 f = fence;
2374 else
2375 f = chain->fence;
2376
2377 dma_fence_get(f);
2378 if (!dma_fence_add_callback(f, &h->cb, dma_fence_chain_cb)) {
2379 dma_fence_put(fence);
2380 return true;
2381 }
2382 dma_fence_put(f);
2383 }
2384 dma_fence_put(&h->base);
2385 return false;
2386 }
2387
2388 static bool
dma_fence_chain_signaled(struct dma_fence * fence)2389 dma_fence_chain_signaled(struct dma_fence *fence)
2390 {
2391 struct dma_fence_chain *chain;
2392 struct dma_fence *f;
2393
2394 dma_fence_chain_for_each(fence, fence) {
2395 chain = to_dma_fence_chain(fence);
2396 if (chain == NULL)
2397 f = fence;
2398 else
2399 f = chain->fence;
2400
2401 if (dma_fence_is_signaled(f) == false) {
2402 dma_fence_put(fence);
2403 return false;
2404 }
2405 }
2406 return true;
2407 }
2408
2409 static void
dma_fence_chain_release(struct dma_fence * fence)2410 dma_fence_chain_release(struct dma_fence *fence)
2411 {
2412 struct dma_fence_chain *chain = to_dma_fence_chain(fence);
2413 struct dma_fence_chain *prev_chain;
2414 struct dma_fence *prev;
2415
2416 for (prev = chain->prev; prev != NULL; prev = chain->prev) {
2417 if (kref_read(&prev->refcount) > 1)
2418 break;
2419 if ((prev_chain = to_dma_fence_chain(prev)) == NULL)
2420 break;
2421 chain->prev = prev_chain->prev;
2422 prev_chain->prev = NULL;
2423 dma_fence_put(prev);
2424 }
2425 dma_fence_put(prev);
2426 dma_fence_put(chain->fence);
2427 dma_fence_free(fence);
2428 }
2429
2430 struct dma_fence *
dma_fence_chain_walk(struct dma_fence * fence)2431 dma_fence_chain_walk(struct dma_fence *fence)
2432 {
2433 struct dma_fence_chain *chain = to_dma_fence_chain(fence), *prev_chain;
2434 struct dma_fence *prev, *new_prev, *tmp;
2435
2436 if (chain == NULL) {
2437 dma_fence_put(fence);
2438 return NULL;
2439 }
2440
2441 while ((prev = dma_fence_get(chain->prev)) != NULL) {
2442 prev_chain = to_dma_fence_chain(prev);
2443 if (prev_chain != NULL) {
2444 if (!dma_fence_is_signaled(prev_chain->fence))
2445 break;
2446 new_prev = dma_fence_get(prev_chain->prev);
2447 } else {
2448 if (!dma_fence_is_signaled(prev))
2449 break;
2450 new_prev = NULL;
2451 }
2452 tmp = atomic_cas_ptr(&chain->prev, prev, new_prev);
2453 dma_fence_put(tmp == prev ? prev : new_prev);
2454 dma_fence_put(prev);
2455 }
2456
2457 dma_fence_put(fence);
2458 return prev;
2459 }
2460
2461 const struct dma_fence_ops dma_fence_chain_ops = {
2462 .get_driver_name = dma_fence_chain_get_driver_name,
2463 .get_timeline_name = dma_fence_chain_get_timeline_name,
2464 .enable_signaling = dma_fence_chain_enable_signaling,
2465 .signaled = dma_fence_chain_signaled,
2466 .release = dma_fence_chain_release,
2467 .use_64bit_seqno = true,
2468 };
2469
2470 bool
dma_fence_is_container(struct dma_fence * fence)2471 dma_fence_is_container(struct dma_fence *fence)
2472 {
2473 return (fence->ops == &dma_fence_chain_ops) ||
2474 (fence->ops == &dma_fence_array_ops);
2475 }
2476
2477 int
dmabuf_read(struct file * fp,struct uio * uio,int fflags)2478 dmabuf_read(struct file *fp, struct uio *uio, int fflags)
2479 {
2480 return (ENXIO);
2481 }
2482
2483 int
dmabuf_write(struct file * fp,struct uio * uio,int fflags)2484 dmabuf_write(struct file *fp, struct uio *uio, int fflags)
2485 {
2486 return (ENXIO);
2487 }
2488
2489 int
dmabuf_ioctl(struct file * fp,u_long com,caddr_t data,struct proc * p)2490 dmabuf_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
2491 {
2492 return (ENOTTY);
2493 }
2494
2495 int
dmabuf_kqfilter(struct file * fp,struct knote * kn)2496 dmabuf_kqfilter(struct file *fp, struct knote *kn)
2497 {
2498 return (EINVAL);
2499 }
2500
2501 int
dmabuf_stat(struct file * fp,struct stat * st,struct proc * p)2502 dmabuf_stat(struct file *fp, struct stat *st, struct proc *p)
2503 {
2504 struct dma_buf *dmabuf = fp->f_data;
2505
2506 memset(st, 0, sizeof(*st));
2507 st->st_size = dmabuf->size;
2508 st->st_mode = S_IFIFO; /* XXX */
2509 return (0);
2510 }
2511
2512 int
dmabuf_close(struct file * fp,struct proc * p)2513 dmabuf_close(struct file *fp, struct proc *p)
2514 {
2515 struct dma_buf *dmabuf = fp->f_data;
2516
2517 fp->f_data = NULL;
2518 KERNEL_LOCK();
2519 dmabuf->ops->release(dmabuf);
2520 KERNEL_UNLOCK();
2521 free(dmabuf, M_DRM, sizeof(struct dma_buf));
2522 return (0);
2523 }
2524
2525 int
dmabuf_seek(struct file * fp,off_t * offset,int whence,struct proc * p)2526 dmabuf_seek(struct file *fp, off_t *offset, int whence, struct proc *p)
2527 {
2528 struct dma_buf *dmabuf = fp->f_data;
2529 off_t newoff;
2530
2531 if (*offset != 0)
2532 return (EINVAL);
2533
2534 switch (whence) {
2535 case SEEK_SET:
2536 newoff = 0;
2537 break;
2538 case SEEK_END:
2539 newoff = dmabuf->size;
2540 break;
2541 default:
2542 return (EINVAL);
2543 }
2544 mtx_enter(&fp->f_mtx);
2545 fp->f_offset = newoff;
2546 mtx_leave(&fp->f_mtx);
2547 *offset = newoff;
2548 return (0);
2549 }
2550
2551 const struct fileops dmabufops = {
2552 .fo_read = dmabuf_read,
2553 .fo_write = dmabuf_write,
2554 .fo_ioctl = dmabuf_ioctl,
2555 .fo_kqfilter = dmabuf_kqfilter,
2556 .fo_stat = dmabuf_stat,
2557 .fo_close = dmabuf_close,
2558 .fo_seek = dmabuf_seek,
2559 };
2560
2561 struct dma_buf *
dma_buf_export(const struct dma_buf_export_info * info)2562 dma_buf_export(const struct dma_buf_export_info *info)
2563 {
2564 struct proc *p = curproc;
2565 struct dma_buf *dmabuf;
2566 struct file *fp;
2567
2568 fp = fnew(p);
2569 if (fp == NULL)
2570 return ERR_PTR(-ENFILE);
2571 fp->f_type = DTYPE_DMABUF;
2572 fp->f_ops = &dmabufops;
2573 dmabuf = malloc(sizeof(struct dma_buf), M_DRM, M_WAITOK | M_ZERO);
2574 dmabuf->priv = info->priv;
2575 dmabuf->ops = info->ops;
2576 dmabuf->size = info->size;
2577 dmabuf->file = fp;
2578 fp->f_data = dmabuf;
2579 INIT_LIST_HEAD(&dmabuf->attachments);
2580 return dmabuf;
2581 }
2582
2583 struct dma_buf *
dma_buf_get(int fd)2584 dma_buf_get(int fd)
2585 {
2586 struct proc *p = curproc;
2587 struct filedesc *fdp = p->p_fd;
2588 struct file *fp;
2589
2590 if ((fp = fd_getfile(fdp, fd)) == NULL)
2591 return ERR_PTR(-EBADF);
2592
2593 if (fp->f_type != DTYPE_DMABUF) {
2594 FRELE(fp, p);
2595 return ERR_PTR(-EINVAL);
2596 }
2597
2598 return fp->f_data;
2599 }
2600
2601 void
dma_buf_put(struct dma_buf * dmabuf)2602 dma_buf_put(struct dma_buf *dmabuf)
2603 {
2604 KASSERT(dmabuf);
2605 KASSERT(dmabuf->file);
2606
2607 FRELE(dmabuf->file, curproc);
2608 }
2609
2610 int
dma_buf_fd(struct dma_buf * dmabuf,int flags)2611 dma_buf_fd(struct dma_buf *dmabuf, int flags)
2612 {
2613 struct proc *p = curproc;
2614 struct filedesc *fdp = p->p_fd;
2615 struct file *fp = dmabuf->file;
2616 int fd, cloexec, error;
2617
2618 cloexec = (flags & O_CLOEXEC) ? UF_EXCLOSE : 0;
2619
2620 fdplock(fdp);
2621 restart:
2622 if ((error = fdalloc(p, 0, &fd)) != 0) {
2623 if (error == ENOSPC) {
2624 fdexpand(p);
2625 goto restart;
2626 }
2627 fdpunlock(fdp);
2628 return -error;
2629 }
2630
2631 fdinsert(fdp, fd, cloexec, fp);
2632 fdpunlock(fdp);
2633
2634 return fd;
2635 }
2636
2637 void
get_dma_buf(struct dma_buf * dmabuf)2638 get_dma_buf(struct dma_buf *dmabuf)
2639 {
2640 FREF(dmabuf->file);
2641 }
2642
2643 enum pci_bus_speed
pcie_get_speed_cap(struct pci_dev * pdev)2644 pcie_get_speed_cap(struct pci_dev *pdev)
2645 {
2646 pci_chipset_tag_t pc;
2647 pcitag_t tag;
2648 int pos ;
2649 pcireg_t xcap, lnkcap = 0, lnkcap2 = 0;
2650 pcireg_t id;
2651 enum pci_bus_speed cap = PCI_SPEED_UNKNOWN;
2652 int bus, device, function;
2653
2654 if (pdev == NULL)
2655 return PCI_SPEED_UNKNOWN;
2656
2657 pc = pdev->pc;
2658 tag = pdev->tag;
2659
2660 if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
2661 &pos, NULL))
2662 return PCI_SPEED_UNKNOWN;
2663
2664 id = pci_conf_read(pc, tag, PCI_ID_REG);
2665 pci_decompose_tag(pc, tag, &bus, &device, &function);
2666
2667 /* we've been informed via and serverworks don't make the cut */
2668 if (PCI_VENDOR(id) == PCI_VENDOR_VIATECH ||
2669 PCI_VENDOR(id) == PCI_VENDOR_RCC)
2670 return PCI_SPEED_UNKNOWN;
2671
2672 lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
2673 xcap = pci_conf_read(pc, tag, pos + PCI_PCIE_XCAP);
2674 if (PCI_PCIE_XCAP_VER(xcap) >= 2)
2675 lnkcap2 = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP2);
2676
2677 lnkcap &= 0x0f;
2678 lnkcap2 &= 0xfe;
2679
2680 if (lnkcap2) { /* PCIE GEN 3.0 */
2681 if (lnkcap2 & 0x02)
2682 cap = PCIE_SPEED_2_5GT;
2683 if (lnkcap2 & 0x04)
2684 cap = PCIE_SPEED_5_0GT;
2685 if (lnkcap2 & 0x08)
2686 cap = PCIE_SPEED_8_0GT;
2687 if (lnkcap2 & 0x10)
2688 cap = PCIE_SPEED_16_0GT;
2689 if (lnkcap2 & 0x20)
2690 cap = PCIE_SPEED_32_0GT;
2691 if (lnkcap2 & 0x40)
2692 cap = PCIE_SPEED_64_0GT;
2693 } else {
2694 if (lnkcap & 0x01)
2695 cap = PCIE_SPEED_2_5GT;
2696 if (lnkcap & 0x02)
2697 cap = PCIE_SPEED_5_0GT;
2698 }
2699
2700 DRM_INFO("probing pcie caps for device %d:%d:%d 0x%04x:0x%04x = %x/%x\n",
2701 bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap,
2702 lnkcap2);
2703 return cap;
2704 }
2705
2706 enum pcie_link_width
pcie_get_width_cap(struct pci_dev * pdev)2707 pcie_get_width_cap(struct pci_dev *pdev)
2708 {
2709 pci_chipset_tag_t pc = pdev->pc;
2710 pcitag_t tag = pdev->tag;
2711 int pos ;
2712 pcireg_t lnkcap = 0;
2713 pcireg_t id;
2714 int bus, device, function;
2715
2716 if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
2717 &pos, NULL))
2718 return PCIE_LNK_WIDTH_UNKNOWN;
2719
2720 id = pci_conf_read(pc, tag, PCI_ID_REG);
2721 pci_decompose_tag(pc, tag, &bus, &device, &function);
2722
2723 lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
2724
2725 DRM_INFO("probing pcie width for device %d:%d:%d 0x%04x:0x%04x = %x\n",
2726 bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap);
2727
2728 if (lnkcap)
2729 return (lnkcap & 0x3f0) >> 4;
2730 return PCIE_LNK_WIDTH_UNKNOWN;
2731 }
2732
2733 bool
pcie_aspm_enabled(struct pci_dev * pdev)2734 pcie_aspm_enabled(struct pci_dev *pdev)
2735 {
2736 pci_chipset_tag_t pc = pdev->pc;
2737 pcitag_t tag = pdev->tag;
2738 int pos ;
2739 pcireg_t lcsr;
2740
2741 if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
2742 &pos, NULL))
2743 return false;
2744
2745 lcsr = pci_conf_read(pc, tag, pos + PCI_PCIE_LCSR);
2746 if ((lcsr & (PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1)) != 0)
2747 return true;
2748
2749 return false;
2750 }
2751
2752 static wait_queue_head_t bit_waitq;
2753 wait_queue_head_t var_waitq;
2754 struct mutex wait_bit_mtx = MUTEX_INITIALIZER(IPL_TTY);
2755
2756 int
wait_on_bit(unsigned long * word,int bit,unsigned mode)2757 wait_on_bit(unsigned long *word, int bit, unsigned mode)
2758 {
2759 int err;
2760
2761 if (!test_bit(bit, word))
2762 return 0;
2763
2764 mtx_enter(&wait_bit_mtx);
2765 while (test_bit(bit, word)) {
2766 err = msleep_nsec(word, &wait_bit_mtx, PWAIT | mode, "wtb",
2767 INFSLP);
2768 if (err) {
2769 mtx_leave(&wait_bit_mtx);
2770 return 1;
2771 }
2772 }
2773 mtx_leave(&wait_bit_mtx);
2774 return 0;
2775 }
2776
2777 int
wait_on_bit_timeout(unsigned long * word,int bit,unsigned mode,int timo)2778 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode, int timo)
2779 {
2780 int err;
2781
2782 if (!test_bit(bit, word))
2783 return 0;
2784
2785 mtx_enter(&wait_bit_mtx);
2786 while (test_bit(bit, word)) {
2787 err = msleep(word, &wait_bit_mtx, PWAIT | mode, "wtb", timo);
2788 if (err) {
2789 mtx_leave(&wait_bit_mtx);
2790 return 1;
2791 }
2792 }
2793 mtx_leave(&wait_bit_mtx);
2794 return 0;
2795 }
2796
2797 void
wake_up_bit(void * word,int bit)2798 wake_up_bit(void *word, int bit)
2799 {
2800 mtx_enter(&wait_bit_mtx);
2801 wakeup(word);
2802 mtx_leave(&wait_bit_mtx);
2803 }
2804
2805 void
clear_and_wake_up_bit(int bit,void * word)2806 clear_and_wake_up_bit(int bit, void *word)
2807 {
2808 clear_bit(bit, word);
2809 wake_up_bit(word, bit);
2810 }
2811
2812 wait_queue_head_t *
bit_waitqueue(void * word,int bit)2813 bit_waitqueue(void *word, int bit)
2814 {
2815 /* XXX hash table of wait queues? */
2816 return &bit_waitq;
2817 }
2818
2819 wait_queue_head_t *
__var_waitqueue(void * p)2820 __var_waitqueue(void *p)
2821 {
2822 /* XXX hash table of wait queues? */
2823 return &bit_waitq;
2824 }
2825
2826 struct workqueue_struct *system_wq;
2827 struct workqueue_struct *system_highpri_wq;
2828 struct workqueue_struct *system_unbound_wq;
2829 struct workqueue_struct *system_long_wq;
2830 struct taskq *taskletq;
2831
2832 void
drm_linux_init(void)2833 drm_linux_init(void)
2834 {
2835 system_wq = (struct workqueue_struct *)
2836 taskq_create("drmwq", 4, IPL_HIGH, 0);
2837 system_highpri_wq = (struct workqueue_struct *)
2838 taskq_create("drmhpwq", 4, IPL_HIGH, 0);
2839 system_unbound_wq = (struct workqueue_struct *)
2840 taskq_create("drmubwq", 4, IPL_HIGH, 0);
2841 system_long_wq = (struct workqueue_struct *)
2842 taskq_create("drmlwq", 4, IPL_HIGH, 0);
2843
2844 taskletq = taskq_create("drmtskl", 1, IPL_HIGH, 0);
2845
2846 init_waitqueue_head(&bit_waitq);
2847 init_waitqueue_head(&var_waitq);
2848
2849 pool_init(&idr_pool, sizeof(struct idr_entry), 0, IPL_TTY, 0,
2850 "idrpl", NULL);
2851
2852 kmap_atomic_va =
2853 (vaddr_t)km_alloc(PAGE_SIZE, &kv_any, &kp_none, &kd_waitok);
2854 }
2855
2856 void
drm_linux_exit(void)2857 drm_linux_exit(void)
2858 {
2859 pool_destroy(&idr_pool);
2860
2861 taskq_destroy(taskletq);
2862
2863 taskq_destroy((struct taskq *)system_long_wq);
2864 taskq_destroy((struct taskq *)system_unbound_wq);
2865 taskq_destroy((struct taskq *)system_highpri_wq);
2866 taskq_destroy((struct taskq *)system_wq);
2867 }
2868
2869 #define PCIE_ECAP_RESIZE_BAR 0x15
2870 #define RBCAP0 0x04
2871 #define RBCTRL0 0x08
2872 #define RBCTRL_BARINDEX_MASK 0x07
2873 #define RBCTRL_BARSIZE_MASK 0x1f00
2874 #define RBCTRL_BARSIZE_SHIFT 8
2875
2876 /* size in MB is 1 << nsize */
2877 int
pci_resize_resource(struct pci_dev * pdev,int bar,int nsize)2878 pci_resize_resource(struct pci_dev *pdev, int bar, int nsize)
2879 {
2880 pcireg_t reg;
2881 uint32_t offset, capid;
2882
2883 KASSERT(bar == 0);
2884
2885 offset = PCI_PCIE_ECAP;
2886
2887 /* search PCI Express Extended Capabilities */
2888 do {
2889 reg = pci_conf_read(pdev->pc, pdev->tag, offset);
2890 capid = PCI_PCIE_ECAP_ID(reg);
2891 if (capid == PCIE_ECAP_RESIZE_BAR)
2892 break;
2893 offset = PCI_PCIE_ECAP_NEXT(reg);
2894 } while (capid != 0);
2895
2896 if (capid == 0) {
2897 printf("%s: could not find resize bar cap!\n", __func__);
2898 return -ENOTSUP;
2899 }
2900
2901 reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCAP0);
2902
2903 if ((reg & (1 << (nsize + 4))) == 0) {
2904 printf("%s size not supported\n", __func__);
2905 return -ENOTSUP;
2906 }
2907
2908 reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCTRL0);
2909 if ((reg & RBCTRL_BARINDEX_MASK) != 0) {
2910 printf("%s BAR index not 0\n", __func__);
2911 return -EINVAL;
2912 }
2913
2914 reg &= ~RBCTRL_BARSIZE_MASK;
2915 reg |= (nsize << RBCTRL_BARSIZE_SHIFT) & RBCTRL_BARSIZE_MASK;
2916
2917 pci_conf_write(pdev->pc, pdev->tag, offset + RBCTRL0, reg);
2918
2919 return 0;
2920 }
2921
2922 TAILQ_HEAD(, shrinker) shrinkers = TAILQ_HEAD_INITIALIZER(shrinkers);
2923
2924 int
register_shrinker(struct shrinker * shrinker,const char * format,...)2925 register_shrinker(struct shrinker *shrinker, const char *format, ...)
2926 {
2927 TAILQ_INSERT_TAIL(&shrinkers, shrinker, next);
2928 return 0;
2929 }
2930
2931 void
unregister_shrinker(struct shrinker * shrinker)2932 unregister_shrinker(struct shrinker *shrinker)
2933 {
2934 TAILQ_REMOVE(&shrinkers, shrinker, next);
2935 }
2936
2937 void
drmbackoff(long npages)2938 drmbackoff(long npages)
2939 {
2940 struct shrink_control sc;
2941 struct shrinker *shrinker;
2942 u_long ret;
2943
2944 shrinker = TAILQ_FIRST(&shrinkers);
2945 while (shrinker && npages > 0) {
2946 sc.nr_to_scan = npages;
2947 ret = shrinker->scan_objects(shrinker, &sc);
2948 npages -= ret;
2949 shrinker = TAILQ_NEXT(shrinker, next);
2950 }
2951 }
2952
2953 void *
bitmap_zalloc(u_int n,gfp_t flags)2954 bitmap_zalloc(u_int n, gfp_t flags)
2955 {
2956 return kcalloc(BITS_TO_LONGS(n), sizeof(long), flags);
2957 }
2958
2959 void
bitmap_free(void * p)2960 bitmap_free(void *p)
2961 {
2962 kfree(p);
2963 }
2964
2965 int
atomic_dec_and_mutex_lock(volatile int * v,struct rwlock * lock)2966 atomic_dec_and_mutex_lock(volatile int *v, struct rwlock *lock)
2967 {
2968 if (atomic_add_unless(v, -1, 1))
2969 return 0;
2970
2971 rw_enter_write(lock);
2972 if (atomic_dec_return(v) == 0)
2973 return 1;
2974 rw_exit_write(lock);
2975 return 0;
2976 }
2977
2978 int
printk(const char * fmt,...)2979 printk(const char *fmt, ...)
2980 {
2981 int ret, level;
2982 va_list ap;
2983
2984 if (fmt != NULL && *fmt == '\001') {
2985 level = fmt[1];
2986 #ifndef DRMDEBUG
2987 if (level >= KERN_INFO[1] && level <= '9')
2988 return 0;
2989 #endif
2990 fmt += 2;
2991 }
2992
2993 va_start(ap, fmt);
2994 ret = vprintf(fmt, ap);
2995 va_end(ap);
2996
2997 return ret;
2998 }
2999
3000 #define START(node) ((node)->start)
3001 #define LAST(node) ((node)->last)
3002
3003 struct interval_tree_node *
interval_tree_iter_first(struct rb_root_cached * root,unsigned long start,unsigned long last)3004 interval_tree_iter_first(struct rb_root_cached *root, unsigned long start,
3005 unsigned long last)
3006 {
3007 struct interval_tree_node *node;
3008 struct rb_node *rb;
3009
3010 for (rb = rb_first_cached(root); rb; rb = rb_next(rb)) {
3011 node = rb_entry(rb, typeof(*node), rb);
3012 if (LAST(node) >= start && START(node) <= last)
3013 return node;
3014 }
3015 return NULL;
3016 }
3017
3018 void
interval_tree_remove(struct interval_tree_node * node,struct rb_root_cached * root)3019 interval_tree_remove(struct interval_tree_node *node,
3020 struct rb_root_cached *root)
3021 {
3022 rb_erase_cached(&node->rb, root);
3023 }
3024
3025 void
interval_tree_insert(struct interval_tree_node * node,struct rb_root_cached * root)3026 interval_tree_insert(struct interval_tree_node *node,
3027 struct rb_root_cached *root)
3028 {
3029 struct rb_node **iter = &root->rb_root.rb_node;
3030 struct rb_node *parent = NULL;
3031 struct interval_tree_node *iter_node;
3032
3033 while (*iter) {
3034 parent = *iter;
3035 iter_node = rb_entry(*iter, struct interval_tree_node, rb);
3036
3037 if (node->start < iter_node->start)
3038 iter = &(*iter)->rb_left;
3039 else
3040 iter = &(*iter)->rb_right;
3041 }
3042
3043 rb_link_node(&node->rb, parent, iter);
3044 rb_insert_color_cached(&node->rb, root, false);
3045 }
3046
3047 int
syncfile_read(struct file * fp,struct uio * uio,int fflags)3048 syncfile_read(struct file *fp, struct uio *uio, int fflags)
3049 {
3050 return ENXIO;
3051 }
3052
3053 int
syncfile_write(struct file * fp,struct uio * uio,int fflags)3054 syncfile_write(struct file *fp, struct uio *uio, int fflags)
3055 {
3056 return ENXIO;
3057 }
3058
3059 int
syncfile_ioctl(struct file * fp,u_long com,caddr_t data,struct proc * p)3060 syncfile_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
3061 {
3062 return ENOTTY;
3063 }
3064
3065 int
syncfile_kqfilter(struct file * fp,struct knote * kn)3066 syncfile_kqfilter(struct file *fp, struct knote *kn)
3067 {
3068 return EINVAL;
3069 }
3070
3071 int
syncfile_stat(struct file * fp,struct stat * st,struct proc * p)3072 syncfile_stat(struct file *fp, struct stat *st, struct proc *p)
3073 {
3074 memset(st, 0, sizeof(*st));
3075 st->st_mode = S_IFIFO; /* XXX */
3076 return 0;
3077 }
3078
3079 int
syncfile_close(struct file * fp,struct proc * p)3080 syncfile_close(struct file *fp, struct proc *p)
3081 {
3082 struct sync_file *sf = fp->f_data;
3083
3084 dma_fence_put(sf->fence);
3085 fp->f_data = NULL;
3086 free(sf, M_DRM, sizeof(struct sync_file));
3087 return 0;
3088 }
3089
3090 int
syncfile_seek(struct file * fp,off_t * offset,int whence,struct proc * p)3091 syncfile_seek(struct file *fp, off_t *offset, int whence, struct proc *p)
3092 {
3093 off_t newoff;
3094
3095 if (*offset != 0)
3096 return EINVAL;
3097
3098 switch (whence) {
3099 case SEEK_SET:
3100 newoff = 0;
3101 break;
3102 case SEEK_END:
3103 newoff = 0;
3104 break;
3105 default:
3106 return EINVAL;
3107 }
3108 mtx_enter(&fp->f_mtx);
3109 fp->f_offset = newoff;
3110 mtx_leave(&fp->f_mtx);
3111 *offset = newoff;
3112 return 0;
3113 }
3114
3115 const struct fileops syncfileops = {
3116 .fo_read = syncfile_read,
3117 .fo_write = syncfile_write,
3118 .fo_ioctl = syncfile_ioctl,
3119 .fo_kqfilter = syncfile_kqfilter,
3120 .fo_stat = syncfile_stat,
3121 .fo_close = syncfile_close,
3122 .fo_seek = syncfile_seek,
3123 };
3124
3125 void
fd_install(int fd,struct file * fp)3126 fd_install(int fd, struct file *fp)
3127 {
3128 struct proc *p = curproc;
3129 struct filedesc *fdp = p->p_fd;
3130
3131 if (fp->f_type != DTYPE_SYNC)
3132 return;
3133
3134 fdplock(fdp);
3135 /* all callers use get_unused_fd_flags(O_CLOEXEC) */
3136 fdinsert(fdp, fd, UF_EXCLOSE, fp);
3137 fdpunlock(fdp);
3138 }
3139
3140 void
fput(struct file * fp)3141 fput(struct file *fp)
3142 {
3143 if (fp->f_type != DTYPE_SYNC)
3144 return;
3145
3146 FRELE(fp, curproc);
3147 }
3148
3149 int
get_unused_fd_flags(unsigned int flags)3150 get_unused_fd_flags(unsigned int flags)
3151 {
3152 struct proc *p = curproc;
3153 struct filedesc *fdp = p->p_fd;
3154 int error, fd;
3155
3156 KASSERT((flags & O_CLOEXEC) != 0);
3157
3158 fdplock(fdp);
3159 retryalloc:
3160 if ((error = fdalloc(p, 0, &fd)) != 0) {
3161 if (error == ENOSPC) {
3162 fdexpand(p);
3163 goto retryalloc;
3164 }
3165 fdpunlock(fdp);
3166 return -1;
3167 }
3168 fdpunlock(fdp);
3169
3170 return fd;
3171 }
3172
3173 void
put_unused_fd(int fd)3174 put_unused_fd(int fd)
3175 {
3176 struct filedesc *fdp = curproc->p_fd;
3177
3178 fdplock(fdp);
3179 fdremove(fdp, fd);
3180 fdpunlock(fdp);
3181 }
3182
3183 struct dma_fence *
sync_file_get_fence(int fd)3184 sync_file_get_fence(int fd)
3185 {
3186 struct proc *p = curproc;
3187 struct filedesc *fdp = p->p_fd;
3188 struct file *fp;
3189 struct sync_file *sf;
3190 struct dma_fence *f;
3191
3192 if ((fp = fd_getfile(fdp, fd)) == NULL)
3193 return NULL;
3194
3195 if (fp->f_type != DTYPE_SYNC) {
3196 FRELE(fp, p);
3197 return NULL;
3198 }
3199 sf = fp->f_data;
3200 f = dma_fence_get(sf->fence);
3201 FRELE(sf->file, p);
3202 return f;
3203 }
3204
3205 struct sync_file *
sync_file_create(struct dma_fence * fence)3206 sync_file_create(struct dma_fence *fence)
3207 {
3208 struct proc *p = curproc;
3209 struct sync_file *sf;
3210 struct file *fp;
3211
3212 fp = fnew(p);
3213 if (fp == NULL)
3214 return NULL;
3215 fp->f_type = DTYPE_SYNC;
3216 fp->f_ops = &syncfileops;
3217 sf = malloc(sizeof(struct sync_file), M_DRM, M_WAITOK | M_ZERO);
3218 sf->file = fp;
3219 sf->fence = dma_fence_get(fence);
3220 fp->f_data = sf;
3221 return sf;
3222 }
3223
3224 bool
drm_firmware_drivers_only(void)3225 drm_firmware_drivers_only(void)
3226 {
3227 return false;
3228 }
3229
3230
3231 void *
memremap(phys_addr_t phys_addr,size_t size,int flags)3232 memremap(phys_addr_t phys_addr, size_t size, int flags)
3233 {
3234 STUB();
3235 return NULL;
3236 }
3237
3238 void
memunmap(void * addr)3239 memunmap(void *addr)
3240 {
3241 STUB();
3242 }
3243
3244 #include <linux/platform_device.h>
3245
3246 bus_dma_tag_t
dma_tag_lookup(struct device * dev)3247 dma_tag_lookup(struct device *dev)
3248 {
3249 extern struct cfdriver drm_cd;
3250 struct drm_device *drm;
3251 int i;
3252
3253 for (i = 0; i < drm_cd.cd_ndevs; i++) {
3254 drm = drm_cd.cd_devs[i];
3255 if (drm && drm->dev == dev)
3256 return drm->dmat;
3257 }
3258
3259 return ((struct platform_device *)dev)->dmat;
3260 }
3261
3262 LIST_HEAD(, drm_dmamem) dmamem_list = LIST_HEAD_INITIALIZER(dmamem_list);
3263
3264 void *
dma_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,int gfp)3265 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
3266 int gfp)
3267 {
3268 bus_dma_tag_t dmat = dma_tag_lookup(dev);
3269 struct drm_dmamem *mem;
3270
3271 mem = drm_dmamem_alloc(dmat, size, PAGE_SIZE, 1, size,
3272 BUS_DMA_COHERENT, 0);
3273 if (mem == NULL)
3274 return NULL;
3275 *dma_handle = mem->map->dm_segs[0].ds_addr;
3276 LIST_INSERT_HEAD(&dmamem_list, mem, next);
3277 return mem->kva;
3278 }
3279
3280 void
dma_free_coherent(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_handle)3281 dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
3282 dma_addr_t dma_handle)
3283 {
3284 bus_dma_tag_t dmat = dma_tag_lookup(dev);
3285 struct drm_dmamem *mem;
3286
3287 LIST_FOREACH(mem, &dmamem_list, next) {
3288 if (mem->kva == cpu_addr)
3289 break;
3290 }
3291 KASSERT(mem);
3292 KASSERT(mem->size == size);
3293 KASSERT(mem->map->dm_segs[0].ds_addr == dma_handle);
3294
3295 LIST_REMOVE(mem, next);
3296 drm_dmamem_free(dmat, mem);
3297 }
3298
3299 int
dma_get_sgtable(struct device * dev,struct sg_table * sgt,void * cpu_addr,dma_addr_t dma_addr,size_t size)3300 dma_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
3301 dma_addr_t dma_addr, size_t size)
3302 {
3303 paddr_t pa;
3304 int ret;
3305
3306 if (!pmap_extract(pmap_kernel(), (vaddr_t)cpu_addr, &pa))
3307 return -EINVAL;
3308
3309 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
3310 if (ret)
3311 return ret;
3312
3313 sg_set_page(sgt->sgl, PHYS_TO_VM_PAGE(pa), size, 0);
3314 return 0;
3315 }
3316
3317 dma_addr_t
dma_map_resource(struct device * dev,phys_addr_t phys_addr,size_t size,enum dma_data_direction dir,u_long attr)3318 dma_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size,
3319 enum dma_data_direction dir, u_long attr)
3320 {
3321 bus_dma_tag_t dmat= dma_tag_lookup(dev);
3322 bus_dmamap_t map;
3323 bus_dma_segment_t seg;
3324
3325 if (bus_dmamap_create(dmat, size, 1, size, 0,
3326 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &map))
3327 return DMA_MAPPING_ERROR;
3328 seg.ds_addr = phys_addr;
3329 seg.ds_len = size;
3330 if (bus_dmamap_load_raw(dmat, map, &seg, 1, size, BUS_DMA_WAITOK)) {
3331 bus_dmamap_destroy(dmat, map);
3332 return DMA_MAPPING_ERROR;
3333 }
3334
3335 return map->dm_segs[0].ds_addr;
3336 }
3337
3338 #ifdef BUS_DMA_FIXED
3339
3340 #include <linux/iommu.h>
3341
3342 size_t
iommu_map_sgtable(struct iommu_domain * domain,u_long iova,struct sg_table * sgt,int prot)3343 iommu_map_sgtable(struct iommu_domain *domain, u_long iova,
3344 struct sg_table *sgt, int prot)
3345 {
3346 bus_dma_segment_t seg;
3347 int error;
3348
3349 error = bus_dmamap_create(domain->dmat, sgt->sgl->length, 1,
3350 sgt->sgl->length, 0, BUS_DMA_WAITOK, &sgt->dmamap);
3351 if (error)
3352 return -ENOMEM;
3353
3354 sgt->dmamap->dm_segs[0].ds_addr = iova;
3355 sgt->dmamap->dm_segs[0].ds_len = sgt->sgl->length;
3356 sgt->dmamap->dm_nsegs = 1;
3357 seg.ds_addr = VM_PAGE_TO_PHYS(sgt->sgl->__page);
3358 seg.ds_len = sgt->sgl->length;
3359 error = bus_dmamap_load_raw(domain->dmat, sgt->dmamap, &seg, 1,
3360 sgt->sgl->length, BUS_DMA_WAITOK | BUS_DMA_FIXED);
3361 if (error)
3362 return -ENOMEM;
3363
3364 return sg_dma_len(sgt->sgl);
3365 }
3366
3367 size_t
iommu_unmap(struct iommu_domain * domain,u_long iova,size_t size)3368 iommu_unmap(struct iommu_domain *domain, u_long iova, size_t size)
3369 {
3370 STUB();
3371 return 0;
3372 }
3373
3374 struct iommu_domain *
iommu_get_domain_for_dev(struct device * dev)3375 iommu_get_domain_for_dev(struct device *dev)
3376 {
3377 STUB();
3378 return NULL;
3379 }
3380
3381 phys_addr_t
iommu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)3382 iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
3383 {
3384 STUB();
3385 return 0;
3386 }
3387
3388 struct iommu_domain *
iommu_domain_alloc(struct bus_type * type)3389 iommu_domain_alloc(struct bus_type *type)
3390 {
3391 return malloc(sizeof(struct iommu_domain), M_DEVBUF, M_WAITOK | M_ZERO);
3392 }
3393
3394 int
iommu_attach_device(struct iommu_domain * domain,struct device * dev)3395 iommu_attach_device(struct iommu_domain *domain, struct device *dev)
3396 {
3397 struct platform_device *pdev = (struct platform_device *)dev;
3398
3399 domain->dmat = pdev->dmat;
3400 return 0;
3401 }
3402
3403 #endif
3404
3405 #include <linux/component.h>
3406
3407 struct component {
3408 struct device *dev;
3409 struct device *adev;
3410 const struct component_ops *ops;
3411 SLIST_ENTRY(component) next;
3412 };
3413
3414 SLIST_HEAD(,component) component_list = SLIST_HEAD_INITIALIZER(component_list);
3415
3416 int
component_add(struct device * dev,const struct component_ops * ops)3417 component_add(struct device *dev, const struct component_ops *ops)
3418 {
3419 struct component *component;
3420
3421 component = malloc(sizeof(*component), M_DEVBUF, M_WAITOK | M_ZERO);
3422 component->dev = dev;
3423 component->ops = ops;
3424 SLIST_INSERT_HEAD(&component_list, component, next);
3425 return 0;
3426 }
3427
3428 int
component_add_typed(struct device * dev,const struct component_ops * ops,int type)3429 component_add_typed(struct device *dev, const struct component_ops *ops,
3430 int type)
3431 {
3432 return component_add(dev, ops);
3433 }
3434
3435 int
component_bind_all(struct device * dev,void * data)3436 component_bind_all(struct device *dev, void *data)
3437 {
3438 struct component *component;
3439 int ret = 0;
3440
3441 SLIST_FOREACH(component, &component_list, next) {
3442 if (component->adev == dev) {
3443 ret = component->ops->bind(component->dev, NULL, data);
3444 if (ret)
3445 break;
3446 }
3447 }
3448
3449 return ret;
3450 }
3451
3452 struct component_match_entry {
3453 int (*compare)(struct device *, void *);
3454 void *data;
3455 };
3456
3457 struct component_match {
3458 struct component_match_entry match[4];
3459 int nmatches;
3460 };
3461
3462 int
component_master_add_with_match(struct device * dev,const struct component_master_ops * ops,struct component_match * match)3463 component_master_add_with_match(struct device *dev,
3464 const struct component_master_ops *ops, struct component_match *match)
3465 {
3466 struct component *component;
3467 int found = 0;
3468 int i, ret;
3469
3470 SLIST_FOREACH(component, &component_list, next) {
3471 for (i = 0; i < match->nmatches; i++) {
3472 struct component_match_entry *m = &match->match[i];
3473 if (m->compare(component->dev, m->data)) {
3474 component->adev = dev;
3475 found = 1;
3476 break;
3477 }
3478 }
3479 }
3480
3481 if (found) {
3482 ret = ops->bind(dev);
3483 if (ret)
3484 return ret;
3485 }
3486
3487 return 0;
3488 }
3489
3490 #ifdef __HAVE_FDT
3491
3492 #include <linux/platform_device.h>
3493 #include <dev/ofw/openfirm.h>
3494 #include <dev/ofw/fdt.h>
3495 #include <machine/fdt.h>
3496
3497 LIST_HEAD(, platform_device) pdev_list = LIST_HEAD_INITIALIZER(pdev_list);
3498
3499 void
platform_device_register(struct platform_device * pdev)3500 platform_device_register(struct platform_device *pdev)
3501 {
3502 int i;
3503
3504 pdev->num_resources = pdev->faa->fa_nreg;
3505 if (pdev->faa->fa_nreg > 0) {
3506 pdev->resource = mallocarray(pdev->faa->fa_nreg,
3507 sizeof(*pdev->resource), M_DEVBUF, M_WAITOK | M_ZERO);
3508 for (i = 0; i < pdev->faa->fa_nreg; i++) {
3509 pdev->resource[i].start = pdev->faa->fa_reg[i].addr;
3510 pdev->resource[i].end = pdev->faa->fa_reg[i].addr +
3511 pdev->faa->fa_reg[i].size - 1;
3512 }
3513 }
3514
3515 pdev->parent = pdev->dev.dv_parent;
3516 pdev->node = pdev->faa->fa_node;
3517 pdev->iot = pdev->faa->fa_iot;
3518 pdev->dmat = pdev->faa->fa_dmat;
3519 LIST_INSERT_HEAD(&pdev_list, pdev, next);
3520 }
3521
3522
3523 struct resource *
platform_get_resource(struct platform_device * pdev,u_int type,u_int num)3524 platform_get_resource(struct platform_device *pdev, u_int type, u_int num)
3525 {
3526 KASSERT(num < pdev->num_resources);
3527 return &pdev->resource[num];
3528 }
3529
3530 void __iomem *
devm_platform_ioremap_resource_byname(struct platform_device * pdev,const char * name)3531 devm_platform_ioremap_resource_byname(struct platform_device *pdev,
3532 const char *name)
3533 {
3534 bus_space_handle_t ioh;
3535 int err, idx;
3536
3537 idx = OF_getindex(pdev->node, name, "reg-names");
3538 if (idx == -1 || idx >= pdev->num_resources)
3539 return ERR_PTR(-EINVAL);
3540
3541 err = bus_space_map(pdev->iot, pdev->resource[idx].start,
3542 pdev->resource[idx].end - pdev->resource[idx].start + 1,
3543 BUS_SPACE_MAP_LINEAR, &ioh);
3544 if (err)
3545 return ERR_PTR(-err);
3546
3547 return bus_space_vaddr(pdev->iot, ioh);
3548 }
3549
3550 #include <dev/ofw/ofw_clock.h>
3551 #include <linux/clk.h>
3552
3553 struct clk *
devm_clk_get(struct device * dev,const char * name)3554 devm_clk_get(struct device *dev, const char *name)
3555 {
3556 struct platform_device *pdev = (struct platform_device *)dev;
3557 struct clk *clk;
3558
3559 clk = malloc(sizeof(*clk), M_DEVBUF, M_WAITOK);
3560 clk->freq = clock_get_frequency(pdev->node, name);
3561 return clk;
3562 }
3563
3564 u_long
clk_get_rate(struct clk * clk)3565 clk_get_rate(struct clk *clk)
3566 {
3567 return clk->freq;
3568 }
3569
3570 #include <linux/gpio/consumer.h>
3571 #include <dev/ofw/ofw_gpio.h>
3572
3573 struct gpio_desc {
3574 uint32_t gpios[4];
3575 };
3576
3577 struct gpio_desc *
devm_gpiod_get_optional(struct device * dev,const char * name,int flags)3578 devm_gpiod_get_optional(struct device *dev, const char *name, int flags)
3579 {
3580 struct platform_device *pdev = (struct platform_device *)dev;
3581 struct gpio_desc *desc;
3582 char fullname[128];
3583 int len;
3584
3585 snprintf(fullname, sizeof(fullname), "%s-gpios", name);
3586
3587 desc = malloc(sizeof(*desc), M_DEVBUF, M_WAITOK | M_ZERO);
3588 len = OF_getpropintarray(pdev->node, fullname, desc->gpios,
3589 sizeof(desc->gpios));
3590 KASSERT(len <= sizeof(desc->gpios));
3591 if (len < 0) {
3592 free(desc, M_DEVBUF, sizeof(*desc));
3593 return NULL;
3594 }
3595
3596 switch (flags) {
3597 case GPIOD_IN:
3598 gpio_controller_config_pin(desc->gpios, GPIO_CONFIG_INPUT);
3599 break;
3600 case GPIOD_OUT_HIGH:
3601 gpio_controller_config_pin(desc->gpios, GPIO_CONFIG_OUTPUT);
3602 gpio_controller_set_pin(desc->gpios, 1);
3603 break;
3604 default:
3605 panic("%s: unimplemented flags 0x%x", __func__, flags);
3606 }
3607
3608 return desc;
3609 }
3610
3611 int
gpiod_get_value_cansleep(const struct gpio_desc * desc)3612 gpiod_get_value_cansleep(const struct gpio_desc *desc)
3613 {
3614 return gpio_controller_get_pin(((struct gpio_desc *)desc)->gpios);
3615 }
3616
3617 struct phy {
3618 int node;
3619 const char *name;
3620 };
3621
3622 struct phy *
devm_phy_optional_get(struct device * dev,const char * name)3623 devm_phy_optional_get(struct device *dev, const char *name)
3624 {
3625 struct platform_device *pdev = (struct platform_device *)dev;
3626 struct phy *phy;
3627 int idx;
3628
3629 idx = OF_getindex(pdev->node, name, "phy-names");
3630 if (idx == -1)
3631 return NULL;
3632
3633 phy = malloc(sizeof(*phy), M_DEVBUF, M_WAITOK);
3634 phy->node = pdev->node;
3635 phy->name = name;
3636
3637 return phy;
3638 }
3639
3640 struct bus_type platform_bus_type;
3641
3642 #include <dev/ofw/ofw_misc.h>
3643
3644 #include <linux/of.h>
3645 #include <linux/platform_device.h>
3646
3647 struct device_node *
__of_devnode(void * arg)3648 __of_devnode(void *arg)
3649 {
3650 struct device *dev = container_of(arg, struct device, of_node);
3651 struct platform_device *pdev = (struct platform_device *)dev;
3652
3653 return (struct device_node *)(uintptr_t)pdev->node;
3654 }
3655
3656 int
__of_device_is_compatible(struct device_node * np,const char * compatible)3657 __of_device_is_compatible(struct device_node *np, const char *compatible)
3658 {
3659 return OF_is_compatible((uintptr_t)np, compatible);
3660 }
3661
3662 int
__of_property_present(struct device_node * np,const char * propname)3663 __of_property_present(struct device_node *np, const char *propname)
3664 {
3665 return OF_getpropbool((uintptr_t)np, (char *)propname);
3666 }
3667
3668 int
__of_property_read_variable_u32_array(struct device_node * np,const char * propname,uint32_t * out_values,size_t sz_min,size_t sz_max)3669 __of_property_read_variable_u32_array(struct device_node *np,
3670 const char *propname, uint32_t *out_values, size_t sz_min, size_t sz_max)
3671 {
3672 int len;
3673
3674 len = OF_getpropintarray((uintptr_t)np, (char *)propname, out_values,
3675 sz_max * sizeof(*out_values));
3676 if (len < 0)
3677 return -EINVAL;
3678 if (len == 0)
3679 return -ENODATA;
3680 if (len < sz_min * sizeof(*out_values) ||
3681 len > sz_max * sizeof(*out_values))
3682 return -EOVERFLOW;
3683 if (sz_min == 1 && sz_max == 1)
3684 return 0;
3685 return len / sizeof(*out_values);
3686 }
3687
3688 int
__of_property_read_variable_u64_array(struct device_node * np,const char * propname,uint64_t * out_values,size_t sz_min,size_t sz_max)3689 __of_property_read_variable_u64_array(struct device_node *np,
3690 const char *propname, uint64_t *out_values, size_t sz_min, size_t sz_max)
3691 {
3692 int len;
3693
3694 len = OF_getpropint64array((uintptr_t)np, (char *)propname, out_values,
3695 sz_max * sizeof(*out_values));
3696 if (len < 0)
3697 return -EINVAL;
3698 if (len == 0)
3699 return -ENODATA;
3700 if (len < sz_min * sizeof(*out_values) ||
3701 len > sz_max * sizeof(*out_values))
3702 return -EOVERFLOW;
3703 if (sz_min == 1 && sz_max == 1)
3704 return 0;
3705 return len / sizeof(*out_values);
3706 }
3707
3708 int
__of_property_match_string(struct device_node * np,const char * propname,const char * str)3709 __of_property_match_string(struct device_node *np,
3710 const char *propname, const char *str)
3711 {
3712 int idx;
3713
3714 idx = OF_getindex((uintptr_t)np, str, propname);
3715 if (idx == -1)
3716 return -ENODATA;
3717 return idx;
3718 }
3719
3720 struct device_node *
__of_parse_phandle(struct device_node * np,const char * propname,int idx)3721 __of_parse_phandle(struct device_node *np, const char *propname, int idx)
3722 {
3723 uint32_t phandles[16] = {};
3724 int len, node;
3725
3726 len = OF_getpropintarray((uintptr_t)np, (char *)propname, phandles,
3727 sizeof(phandles));
3728 if (len < (idx + 1) * sizeof(uint32_t))
3729 return NULL;
3730
3731 node = OF_getnodebyphandle(phandles[idx]);
3732 if (node == 0)
3733 return NULL;
3734
3735 return (struct device_node *)(uintptr_t)node;
3736 }
3737
3738 int
__of_parse_phandle_with_args(struct device_node * np,const char * propname,const char * cellsname,int idx,struct of_phandle_args * args)3739 __of_parse_phandle_with_args(struct device_node *np, const char *propname,
3740 const char *cellsname, int idx, struct of_phandle_args *args)
3741 {
3742 uint32_t phandles[16] = {};
3743 int i, len, node;
3744
3745 len = OF_getpropintarray((uintptr_t)np, (char *)propname, phandles,
3746 sizeof(phandles));
3747 if (len < (idx + 1) * sizeof(uint32_t))
3748 return -ENOENT;
3749
3750 node = OF_getnodebyphandle(phandles[idx]);
3751 if (node == 0)
3752 return -ENOENT;
3753
3754 args->np = (struct device_node *)(uintptr_t)node;
3755 args->args_count = OF_getpropint(node, (char *)cellsname, 0);
3756 for (i = 0; i < args->args_count; i++)
3757 args->args[i] = phandles[i + 1];
3758
3759 return 0;
3760 }
3761
3762 int
of_address_to_resource(struct device_node * np,int idx,struct resource * res)3763 of_address_to_resource(struct device_node *np, int idx, struct resource *res)
3764 {
3765 uint64_t reg[16] = {};
3766 int len;
3767
3768 KASSERT(idx < 8);
3769
3770 len = OF_getpropint64array((uintptr_t)np, "reg", reg, sizeof(reg));
3771 if (len < 0 || idx >= (len / (2 * sizeof(uint64_t))))
3772 return -EINVAL;
3773
3774 res->start = reg[2 * idx];
3775 res->end = reg[2 * idx] + reg[2 * idx + 1] - 1;
3776
3777 return 0;
3778 }
3779
3780 static int
next_node(int node)3781 next_node(int node)
3782 {
3783 int peer = OF_peer(node);
3784
3785 while (node && !peer) {
3786 node = OF_parent(node);
3787 if (node)
3788 peer = OF_peer(node);
3789 }
3790
3791 return peer;
3792 }
3793
3794 static int
find_matching_node(int node,const struct of_device_id * id)3795 find_matching_node(int node, const struct of_device_id *id)
3796 {
3797 int child, match;
3798 int i;
3799
3800 for (child = OF_child(node); child; child = OF_peer(child)) {
3801 match = find_matching_node(child, id);
3802 if (match)
3803 return match;
3804 }
3805
3806 for (i = 0; id[i].compatible; i++) {
3807 if (OF_is_compatible(node, id[i].compatible))
3808 return node;
3809 }
3810
3811 return 0;
3812 }
3813
3814 struct device_node *
__matching_node(struct device_node * np,const struct of_device_id * id)3815 __matching_node(struct device_node *np, const struct of_device_id *id)
3816 {
3817 int node = OF_peer(0);
3818 int match;
3819
3820 if (np)
3821 node = next_node((uintptr_t)np);
3822 while (node) {
3823 match = find_matching_node(node, id);
3824 if (match)
3825 return (struct device_node *)(uintptr_t)match;
3826 node = next_node(node);
3827 }
3828
3829 return NULL;
3830 }
3831
3832 struct platform_device *
of_platform_device_create(struct device_node * np,const char * bus_id,struct device * parent)3833 of_platform_device_create(struct device_node *np, const char *bus_id,
3834 struct device *parent)
3835 {
3836 struct platform_device *pdev;
3837
3838 pdev = malloc(sizeof(*pdev), M_DEVBUF, M_WAITOK | M_ZERO);
3839 pdev->node = (intptr_t)np;
3840 pdev->parent = parent;
3841
3842 LIST_INSERT_HEAD(&pdev_list, pdev, next);
3843
3844 return pdev;
3845 }
3846
3847 struct platform_device *
of_find_device_by_node(struct device_node * np)3848 of_find_device_by_node(struct device_node *np)
3849 {
3850 struct platform_device *pdev;
3851
3852 LIST_FOREACH(pdev, &pdev_list, next) {
3853 if (pdev->node == (intptr_t)np)
3854 return pdev;
3855 }
3856
3857 return NULL;
3858 }
3859
3860 int
of_device_is_available(struct device_node * np)3861 of_device_is_available(struct device_node *np)
3862 {
3863 char status[32];
3864
3865 if (OF_getprop((uintptr_t)np, "status", status, sizeof(status)) > 0 &&
3866 strcmp(status, "disabled") == 0)
3867 return 0;
3868
3869 return 1;
3870 }
3871
3872 int
of_dma_configure(struct device * dev,struct device_node * np,int force_dma)3873 of_dma_configure(struct device *dev, struct device_node *np, int force_dma)
3874 {
3875 struct platform_device *pdev = (struct platform_device *)dev;
3876 bus_dma_tag_t dmat = dma_tag_lookup(pdev->parent);
3877
3878 pdev->dmat = iommu_device_map(pdev->node, dmat);
3879 return 0;
3880 }
3881
3882 struct device_node *
__of_get_compatible_child(void * p,const char * compat)3883 __of_get_compatible_child(void *p, const char *compat)
3884 {
3885 struct device *dev = container_of(p, struct device, of_node);
3886 struct platform_device *pdev = (struct platform_device *)dev;
3887 int child;
3888
3889 for (child = OF_child(pdev->node); child; child = OF_peer(child)) {
3890 if (OF_is_compatible(child, compat))
3891 return (struct device_node *)(uintptr_t)child;
3892 }
3893 return NULL;
3894 }
3895
3896 struct device_node *
__of_get_child_by_name(void * p,const char * name)3897 __of_get_child_by_name(void *p, const char *name)
3898 {
3899 struct device *dev = container_of(p, struct device, of_node);
3900 struct platform_device *pdev = (struct platform_device *)dev;
3901 int child;
3902
3903 child = OF_getnodebyname(pdev->node, name);
3904 if (child == 0)
3905 return NULL;
3906 return (struct device_node *)(uintptr_t)child;
3907 }
3908
3909 int
component_compare_of(struct device * dev,void * data)3910 component_compare_of(struct device *dev, void *data)
3911 {
3912 struct platform_device *pdev = (struct platform_device *)dev;
3913
3914 return (pdev->node == (intptr_t)data);
3915 }
3916
3917 void
drm_of_component_match_add(struct device * master,struct component_match ** matchptr,int (* compare)(struct device *,void *),struct device_node * np)3918 drm_of_component_match_add(struct device *master,
3919 struct component_match **matchptr,
3920 int (*compare)(struct device *, void *),
3921 struct device_node *np)
3922 {
3923 struct component_match *match = *matchptr;
3924
3925 if (match == NULL) {
3926 match = malloc(sizeof(struct component_match),
3927 M_DEVBUF, M_WAITOK | M_ZERO);
3928 *matchptr = match;
3929 }
3930
3931 KASSERT(match->nmatches < nitems(match->match));
3932 match->match[match->nmatches].compare = compare;
3933 match->match[match->nmatches].data = np;
3934 match->nmatches++;
3935 }
3936
3937 #endif
3938