xref: /openbsd/sys/dev/pci/drm/drm_linux.c (revision 9ea232b5)
1 /*	$OpenBSD: drm_linux.c,v 1.109 2024/01/21 13:36:40 kettenis Exp $	*/
2 /*
3  * Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org>
4  * Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/types.h>
20 #include <sys/systm.h>
21 #include <sys/param.h>
22 #include <sys/event.h>
23 #include <sys/filedesc.h>
24 #include <sys/kthread.h>
25 #include <sys/stat.h>
26 #include <sys/unistd.h>
27 #include <sys/proc.h>
28 #include <sys/pool.h>
29 #include <sys/fcntl.h>
30 
31 #include <dev/pci/ppbreg.h>
32 
33 #include <linux/dma-buf.h>
34 #include <linux/mod_devicetable.h>
35 #include <linux/acpi.h>
36 #include <linux/pagevec.h>
37 #include <linux/dma-fence-array.h>
38 #include <linux/dma-fence-chain.h>
39 #include <linux/interrupt.h>
40 #include <linux/err.h>
41 #include <linux/idr.h>
42 #include <linux/scatterlist.h>
43 #include <linux/i2c.h>
44 #include <linux/pci.h>
45 #include <linux/notifier.h>
46 #include <linux/backlight.h>
47 #include <linux/shrinker.h>
48 #include <linux/fb.h>
49 #include <linux/xarray.h>
50 #include <linux/interval_tree.h>
51 #include <linux/kthread.h>
52 #include <linux/processor.h>
53 #include <linux/sync_file.h>
54 
55 #include <drm/drm_device.h>
56 #include <drm/drm_connector.h>
57 #include <drm/drm_print.h>
58 
59 #if defined(__amd64__) || defined(__i386__)
60 #include "bios.h"
61 #endif
62 
63 /* allowed to sleep */
64 void
65 tasklet_unlock_wait(struct tasklet_struct *ts)
66 {
67 	while (test_bit(TASKLET_STATE_RUN, &ts->state))
68 		cpu_relax();
69 }
70 
71 /* must not sleep */
72 void
73 tasklet_unlock_spin_wait(struct tasklet_struct *ts)
74 {
75 	while (test_bit(TASKLET_STATE_RUN, &ts->state))
76 		cpu_relax();
77 }
78 
79 void
80 tasklet_run(void *arg)
81 {
82 	struct tasklet_struct *ts = arg;
83 
84 	clear_bit(TASKLET_STATE_SCHED, &ts->state);
85 	if (tasklet_trylock(ts)) {
86 		if (!atomic_read(&ts->count)) {
87 			if (ts->use_callback)
88 				ts->callback(ts);
89 			else
90 				ts->func(ts->data);
91 		}
92 		tasklet_unlock(ts);
93 	}
94 }
95 
96 /* 32 bit powerpc lacks 64 bit atomics */
97 #if defined(__powerpc__) && !defined(__powerpc64__)
98 struct mutex atomic64_mtx = MUTEX_INITIALIZER(IPL_HIGH);
99 #endif
100 
101 void
102 set_current_state(int state)
103 {
104 	int prio = state;
105 
106 	KASSERT(state != TASK_RUNNING);
107 	/* check if already on the sleep list */
108 	if (curproc->p_wchan != NULL)
109 		return;
110 	sleep_setup(curproc, prio, "schto");
111 }
112 
113 void
114 __set_current_state(int state)
115 {
116 	struct proc *p = curproc;
117 	int s;
118 
119 	KASSERT(state == TASK_RUNNING);
120 	SCHED_LOCK(s);
121 	unsleep(p);
122 	p->p_stat = SONPROC;
123 	atomic_clearbits_int(&p->p_flag, P_WSLEEP);
124 	SCHED_UNLOCK(s);
125 }
126 
127 void
128 schedule(void)
129 {
130 	schedule_timeout(MAX_SCHEDULE_TIMEOUT);
131 }
132 
133 long
134 schedule_timeout(long timeout)
135 {
136 	unsigned long deadline;
137 	int timo = 0;
138 
139 	KASSERT(!cold);
140 
141 	if (timeout != MAX_SCHEDULE_TIMEOUT)
142 		timo = timeout;
143 	if (timeout != MAX_SCHEDULE_TIMEOUT)
144 		deadline = jiffies + timeout;
145 	sleep_finish(timo, timeout > 0);
146 	if (timeout != MAX_SCHEDULE_TIMEOUT)
147 		timeout = deadline - jiffies;
148 
149 	return timeout > 0 ? timeout : 0;
150 }
151 
152 long
153 schedule_timeout_uninterruptible(long timeout)
154 {
155 	tsleep(curproc, PWAIT, "schtou", timeout);
156 	return 0;
157 }
158 
159 int
160 wake_up_process(struct proc *p)
161 {
162 	int s, rv;
163 
164 	SCHED_LOCK(s);
165 	rv = wakeup_proc(p, NULL, 0);
166 	SCHED_UNLOCK(s);
167 	return rv;
168 }
169 
170 int
171 autoremove_wake_function(struct wait_queue_entry *wqe, unsigned int mode,
172     int sync, void *key)
173 {
174 	if (wqe->private)
175 		wake_up_process(wqe->private);
176 	list_del_init(&wqe->entry);
177 	return 0;
178 }
179 
180 void
181 prepare_to_wait(wait_queue_head_t *wqh, wait_queue_entry_t *wqe, int state)
182 {
183 	mtx_enter(&wqh->lock);
184 	if (list_empty(&wqe->entry))
185 		__add_wait_queue(wqh, wqe);
186 	mtx_leave(&wqh->lock);
187 
188 	set_current_state(state);
189 }
190 
191 void
192 finish_wait(wait_queue_head_t *wqh, wait_queue_entry_t *wqe)
193 {
194 	__set_current_state(TASK_RUNNING);
195 
196 	mtx_enter(&wqh->lock);
197 	if (!list_empty(&wqe->entry))
198 		list_del_init(&wqe->entry);
199 	mtx_leave(&wqh->lock);
200 }
201 
202 void
203 flush_workqueue(struct workqueue_struct *wq)
204 {
205 	if (cold)
206 		return;
207 
208 	if (wq)
209 		taskq_barrier((struct taskq *)wq);
210 }
211 
212 bool
213 flush_work(struct work_struct *work)
214 {
215 	if (cold)
216 		return false;
217 
218 	if (work->tq)
219 		taskq_barrier(work->tq);
220 	return false;
221 }
222 
223 bool
224 flush_delayed_work(struct delayed_work *dwork)
225 {
226 	bool ret = false;
227 
228 	if (cold)
229 		return false;
230 
231 	while (timeout_pending(&dwork->to)) {
232 		tsleep(dwork, PWAIT, "fldwto", 1);
233 		ret = true;
234 	}
235 
236 	if (dwork->tq)
237 		taskq_barrier(dwork->tq);
238 	return ret;
239 }
240 
241 struct kthread {
242 	int (*func)(void *);
243 	void *data;
244 	struct proc *proc;
245 	volatile u_int flags;
246 #define KTHREAD_SHOULDSTOP	0x0000001
247 #define KTHREAD_STOPPED		0x0000002
248 #define KTHREAD_SHOULDPARK	0x0000004
249 #define KTHREAD_PARKED		0x0000008
250 	LIST_ENTRY(kthread) next;
251 };
252 
253 LIST_HEAD(, kthread) kthread_list = LIST_HEAD_INITIALIZER(kthread_list);
254 
255 void
256 kthread_func(void *arg)
257 {
258 	struct kthread *thread = arg;
259 	int ret;
260 
261 	ret = thread->func(thread->data);
262 	thread->flags |= KTHREAD_STOPPED;
263 	wakeup(thread);
264 	kthread_exit(ret);
265 }
266 
267 struct proc *
268 kthread_run(int (*func)(void *), void *data, const char *name)
269 {
270 	struct kthread *thread;
271 
272 	thread = malloc(sizeof(*thread), M_DRM, M_WAITOK);
273 	thread->func = func;
274 	thread->data = data;
275 	thread->flags = 0;
276 
277 	if (kthread_create(kthread_func, thread, &thread->proc, name)) {
278 		free(thread, M_DRM, sizeof(*thread));
279 		return ERR_PTR(-ENOMEM);
280 	}
281 
282 	LIST_INSERT_HEAD(&kthread_list, thread, next);
283 	return thread->proc;
284 }
285 
286 struct kthread_worker *
287 kthread_create_worker(unsigned int flags, const char *fmt, ...)
288 {
289 	char name[MAXCOMLEN+1];
290 	va_list ap;
291 
292 	struct kthread_worker *w = malloc(sizeof(*w), M_DRM, M_WAITOK);
293 	va_start(ap, fmt);
294 	vsnprintf(name, sizeof(name), fmt, ap);
295 	va_end(ap);
296 	w->tq = taskq_create(name, 1, IPL_HIGH, 0);
297 
298 	return w;
299 }
300 
301 void
302 kthread_destroy_worker(struct kthread_worker *worker)
303 {
304 	taskq_destroy(worker->tq);
305 	free(worker, M_DRM, sizeof(*worker));
306 
307 }
308 
309 void
310 kthread_init_work(struct kthread_work *work, void (*func)(struct kthread_work *))
311 {
312 	work->tq = NULL;
313 	task_set(&work->task, (void (*)(void *))func, work);
314 }
315 
316 bool
317 kthread_queue_work(struct kthread_worker *worker, struct kthread_work *work)
318 {
319 	work->tq = worker->tq;
320 	return task_add(work->tq, &work->task);
321 }
322 
323 bool
324 kthread_cancel_work_sync(struct kthread_work *work)
325 {
326 	return task_del(work->tq, &work->task);
327 }
328 
329 void
330 kthread_flush_work(struct kthread_work *work)
331 {
332 	if (cold)
333 		return;
334 
335 	if (work->tq)
336 		taskq_barrier(work->tq);
337 }
338 
339 void
340 kthread_flush_worker(struct kthread_worker *worker)
341 {
342 	if (cold)
343 		return;
344 
345 	if (worker->tq)
346 		taskq_barrier(worker->tq);
347 }
348 
349 struct kthread *
350 kthread_lookup(struct proc *p)
351 {
352 	struct kthread *thread;
353 
354 	LIST_FOREACH(thread, &kthread_list, next) {
355 		if (thread->proc == p)
356 			break;
357 	}
358 	KASSERT(thread);
359 
360 	return thread;
361 }
362 
363 int
364 kthread_should_park(void)
365 {
366 	struct kthread *thread = kthread_lookup(curproc);
367 	return (thread->flags & KTHREAD_SHOULDPARK);
368 }
369 
370 void
371 kthread_parkme(void)
372 {
373 	struct kthread *thread = kthread_lookup(curproc);
374 
375 	while (thread->flags & KTHREAD_SHOULDPARK) {
376 		thread->flags |= KTHREAD_PARKED;
377 		wakeup(thread);
378 		tsleep_nsec(thread, PPAUSE, "parkme", INFSLP);
379 		thread->flags &= ~KTHREAD_PARKED;
380 	}
381 }
382 
383 void
384 kthread_park(struct proc *p)
385 {
386 	struct kthread *thread = kthread_lookup(p);
387 
388 	while ((thread->flags & KTHREAD_PARKED) == 0) {
389 		thread->flags |= KTHREAD_SHOULDPARK;
390 		wake_up_process(thread->proc);
391 		tsleep_nsec(thread, PPAUSE, "park", INFSLP);
392 	}
393 }
394 
395 void
396 kthread_unpark(struct proc *p)
397 {
398 	struct kthread *thread = kthread_lookup(p);
399 
400 	thread->flags &= ~KTHREAD_SHOULDPARK;
401 	wakeup(thread);
402 }
403 
404 int
405 kthread_should_stop(void)
406 {
407 	struct kthread *thread = kthread_lookup(curproc);
408 	return (thread->flags & KTHREAD_SHOULDSTOP);
409 }
410 
411 void
412 kthread_stop(struct proc *p)
413 {
414 	struct kthread *thread = kthread_lookup(p);
415 
416 	while ((thread->flags & KTHREAD_STOPPED) == 0) {
417 		thread->flags |= KTHREAD_SHOULDSTOP;
418 		kthread_unpark(p);
419 		wake_up_process(thread->proc);
420 		tsleep_nsec(thread, PPAUSE, "stop", INFSLP);
421 	}
422 	LIST_REMOVE(thread, next);
423 	free(thread, M_DRM, sizeof(*thread));
424 }
425 
426 #if NBIOS > 0
427 extern char smbios_board_vendor[];
428 extern char smbios_board_prod[];
429 extern char smbios_board_serial[];
430 #endif
431 
432 bool
433 dmi_match(int slot, const char *str)
434 {
435 	switch (slot) {
436 	case DMI_SYS_VENDOR:
437 		if (hw_vendor != NULL &&
438 		    !strcmp(hw_vendor, str))
439 			return true;
440 		break;
441 	case DMI_PRODUCT_NAME:
442 		if (hw_prod != NULL &&
443 		    !strcmp(hw_prod, str))
444 			return true;
445 		break;
446 	case DMI_PRODUCT_VERSION:
447 		if (hw_ver != NULL &&
448 		    !strcmp(hw_ver, str))
449 			return true;
450 		break;
451 #if NBIOS > 0
452 	case DMI_BOARD_VENDOR:
453 		if (strcmp(smbios_board_vendor, str) == 0)
454 			return true;
455 		break;
456 	case DMI_BOARD_NAME:
457 		if (strcmp(smbios_board_prod, str) == 0)
458 			return true;
459 		break;
460 	case DMI_BOARD_SERIAL:
461 		if (strcmp(smbios_board_serial, str) == 0)
462 			return true;
463 		break;
464 #else
465 	case DMI_BOARD_VENDOR:
466 		if (hw_vendor != NULL &&
467 		    !strcmp(hw_vendor, str))
468 			return true;
469 		break;
470 	case DMI_BOARD_NAME:
471 		if (hw_prod != NULL &&
472 		    !strcmp(hw_prod, str))
473 			return true;
474 		break;
475 #endif
476 	case DMI_NONE:
477 	default:
478 		return false;
479 	}
480 
481 	return false;
482 }
483 
484 static bool
485 dmi_found(const struct dmi_system_id *dsi)
486 {
487 	int i, slot;
488 
489 	for (i = 0; i < nitems(dsi->matches); i++) {
490 		slot = dsi->matches[i].slot;
491 		if (slot == DMI_NONE)
492 			break;
493 		if (!dmi_match(slot, dsi->matches[i].substr))
494 			return false;
495 	}
496 
497 	return true;
498 }
499 
500 const struct dmi_system_id *
501 dmi_first_match(const struct dmi_system_id *sysid)
502 {
503 	const struct dmi_system_id *dsi;
504 
505 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
506 		if (dmi_found(dsi))
507 			return dsi;
508 	}
509 
510 	return NULL;
511 }
512 
513 #if NBIOS > 0
514 extern char smbios_bios_date[];
515 extern char smbios_bios_version[];
516 #endif
517 
518 const char *
519 dmi_get_system_info(int slot)
520 {
521 #if NBIOS > 0
522 	switch (slot) {
523 	case DMI_BIOS_DATE:
524 		return smbios_bios_date;
525 	case DMI_BIOS_VERSION:
526 		return smbios_bios_version;
527 	default:
528 		printf("%s slot %d not handled\n", __func__, slot);
529 	}
530 #endif
531 	return NULL;
532 }
533 
534 int
535 dmi_check_system(const struct dmi_system_id *sysid)
536 {
537 	const struct dmi_system_id *dsi;
538 	int num = 0;
539 
540 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
541 		if (dmi_found(dsi)) {
542 			num++;
543 			if (dsi->callback && dsi->callback(dsi))
544 				break;
545 		}
546 	}
547 	return (num);
548 }
549 
550 struct vm_page *
551 alloc_pages(unsigned int gfp_mask, unsigned int order)
552 {
553 	int flags = (gfp_mask & M_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
554 	struct uvm_constraint_range *constraint = &no_constraint;
555 	struct pglist mlist;
556 
557 	if (gfp_mask & M_CANFAIL)
558 		flags |= UVM_PLA_FAILOK;
559 	if (gfp_mask & M_ZERO)
560 		flags |= UVM_PLA_ZERO;
561 	if (gfp_mask & __GFP_DMA32)
562 		constraint = &dma_constraint;
563 
564 	TAILQ_INIT(&mlist);
565 	if (uvm_pglistalloc(PAGE_SIZE << order, constraint->ucr_low,
566 	    constraint->ucr_high, PAGE_SIZE, 0, &mlist, 1, flags))
567 		return NULL;
568 	return TAILQ_FIRST(&mlist);
569 }
570 
571 void
572 __free_pages(struct vm_page *page, unsigned int order)
573 {
574 	struct pglist mlist;
575 	int i;
576 
577 	TAILQ_INIT(&mlist);
578 	for (i = 0; i < (1 << order); i++)
579 		TAILQ_INSERT_TAIL(&mlist, &page[i], pageq);
580 	uvm_pglistfree(&mlist);
581 }
582 
583 void
584 __pagevec_release(struct pagevec *pvec)
585 {
586 	struct pglist mlist;
587 	int i;
588 
589 	TAILQ_INIT(&mlist);
590 	for (i = 0; i < pvec->nr; i++)
591 		TAILQ_INSERT_TAIL(&mlist, pvec->pages[i], pageq);
592 	uvm_pglistfree(&mlist);
593 	pagevec_reinit(pvec);
594 }
595 
596 static struct kmem_va_mode kv_physwait = {
597 	.kv_map = &phys_map,
598 	.kv_wait = 1,
599 };
600 
601 void *
602 kmap(struct vm_page *pg)
603 {
604 	vaddr_t va;
605 
606 #if defined (__HAVE_PMAP_DIRECT)
607 	va = pmap_map_direct(pg);
608 #else
609 	va = (vaddr_t)km_alloc(PAGE_SIZE, &kv_physwait, &kp_none, &kd_waitok);
610 	pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
611 	pmap_update(pmap_kernel());
612 #endif
613 	return (void *)va;
614 }
615 
616 void
617 kunmap_va(void *addr)
618 {
619 	vaddr_t va = (vaddr_t)addr;
620 
621 #if defined (__HAVE_PMAP_DIRECT)
622 	pmap_unmap_direct(va);
623 #else
624 	pmap_kremove(va, PAGE_SIZE);
625 	pmap_update(pmap_kernel());
626 	km_free((void *)va, PAGE_SIZE, &kv_physwait, &kp_none);
627 #endif
628 }
629 
630 vaddr_t kmap_atomic_va;
631 int kmap_atomic_inuse;
632 
633 void *
634 kmap_atomic_prot(struct vm_page *pg, pgprot_t prot)
635 {
636 	KASSERT(!kmap_atomic_inuse);
637 
638 	kmap_atomic_inuse = 1;
639 	pmap_kenter_pa(kmap_atomic_va, VM_PAGE_TO_PHYS(pg) | prot,
640 	    PROT_READ | PROT_WRITE);
641 	return (void *)kmap_atomic_va;
642 }
643 
644 void
645 kunmap_atomic(void *addr)
646 {
647 	KASSERT(kmap_atomic_inuse);
648 
649 	pmap_kremove(kmap_atomic_va, PAGE_SIZE);
650 	kmap_atomic_inuse = 0;
651 }
652 
653 void *
654 vmap(struct vm_page **pages, unsigned int npages, unsigned long flags,
655      pgprot_t prot)
656 {
657 	vaddr_t va;
658 	paddr_t pa;
659 	int i;
660 
661 	va = (vaddr_t)km_alloc(PAGE_SIZE * npages, &kv_any, &kp_none,
662 	    &kd_nowait);
663 	if (va == 0)
664 		return NULL;
665 	for (i = 0; i < npages; i++) {
666 		pa = VM_PAGE_TO_PHYS(pages[i]) | prot;
667 		pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa,
668 		    PROT_READ | PROT_WRITE,
669 		    PROT_READ | PROT_WRITE | PMAP_WIRED);
670 		pmap_update(pmap_kernel());
671 	}
672 
673 	return (void *)va;
674 }
675 
676 void
677 vunmap(void *addr, size_t size)
678 {
679 	vaddr_t va = (vaddr_t)addr;
680 
681 	pmap_remove(pmap_kernel(), va, va + size);
682 	pmap_update(pmap_kernel());
683 	km_free((void *)va, size, &kv_any, &kp_none);
684 }
685 
686 bool
687 is_vmalloc_addr(const void *p)
688 {
689 	vaddr_t min, max, addr;
690 
691 	min = vm_map_min(kernel_map);
692 	max = vm_map_max(kernel_map);
693 	addr = (vaddr_t)p;
694 
695 	if (addr >= min && addr <= max)
696 		return true;
697 	else
698 		return false;
699 }
700 
701 void
702 print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
703     int rowsize, int groupsize, const void *buf, size_t len, bool ascii)
704 {
705 	const uint8_t *cbuf = buf;
706 	int i;
707 
708 	for (i = 0; i < len; i++) {
709 		if ((i % rowsize) == 0)
710 			printf("%s", prefix_str);
711 		printf("%02x", cbuf[i]);
712 		if ((i % rowsize) == (rowsize - 1))
713 			printf("\n");
714 		else
715 			printf(" ");
716 	}
717 }
718 
719 void *
720 memchr_inv(const void *s, int c, size_t n)
721 {
722 	if (n != 0) {
723 		const unsigned char *p = s;
724 
725 		do {
726 			if (*p++ != (unsigned char)c)
727 				return ((void *)(p - 1));
728 		} while (--n != 0);
729 	}
730 	return (NULL);
731 }
732 
733 int
734 panic_cmp(struct rb_node *a, struct rb_node *b)
735 {
736 	panic(__func__);
737 }
738 
739 #undef RB_ROOT
740 #define RB_ROOT(head)	(head)->rbh_root
741 
742 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
743 
744 /*
745  * This is a fairly minimal implementation of the Linux "idr" API.  It
746  * probably isn't very efficient, and definitely isn't RCU safe.  The
747  * pre-load buffer is global instead of per-cpu; we rely on the kernel
748  * lock to make this work.  We do randomize our IDs in order to make
749  * them harder to guess.
750  */
751 
752 int idr_cmp(struct idr_entry *, struct idr_entry *);
753 SPLAY_PROTOTYPE(idr_tree, idr_entry, entry, idr_cmp);
754 
755 struct pool idr_pool;
756 struct idr_entry *idr_entry_cache;
757 
758 void
759 idr_init(struct idr *idr)
760 {
761 	SPLAY_INIT(&idr->tree);
762 }
763 
764 void
765 idr_destroy(struct idr *idr)
766 {
767 	struct idr_entry *id;
768 
769 	while ((id = SPLAY_MIN(idr_tree, &idr->tree))) {
770 		SPLAY_REMOVE(idr_tree, &idr->tree, id);
771 		pool_put(&idr_pool, id);
772 	}
773 }
774 
775 void
776 idr_preload(unsigned int gfp_mask)
777 {
778 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
779 
780 	KERNEL_ASSERT_LOCKED();
781 
782 	if (idr_entry_cache == NULL)
783 		idr_entry_cache = pool_get(&idr_pool, flags);
784 }
785 
786 int
787 idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
788 {
789 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
790 	struct idr_entry *id;
791 	int begin;
792 
793 	KERNEL_ASSERT_LOCKED();
794 
795 	if (idr_entry_cache) {
796 		id = idr_entry_cache;
797 		idr_entry_cache = NULL;
798 	} else {
799 		id = pool_get(&idr_pool, flags);
800 		if (id == NULL)
801 			return -ENOMEM;
802 	}
803 
804 	if (end <= 0)
805 		end = INT_MAX;
806 
807 #ifdef notyet
808 	id->id = begin = start + arc4random_uniform(end - start);
809 #else
810 	id->id = begin = start;
811 #endif
812 	while (SPLAY_INSERT(idr_tree, &idr->tree, id)) {
813 		if (id->id == end)
814 			id->id = start;
815 		else
816 			id->id++;
817 		if (id->id == begin) {
818 			pool_put(&idr_pool, id);
819 			return -ENOSPC;
820 		}
821 	}
822 	id->ptr = ptr;
823 	return id->id;
824 }
825 
826 void *
827 idr_replace(struct idr *idr, void *ptr, unsigned long id)
828 {
829 	struct idr_entry find, *res;
830 	void *old;
831 
832 	find.id = id;
833 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
834 	if (res == NULL)
835 		return ERR_PTR(-ENOENT);
836 	old = res->ptr;
837 	res->ptr = ptr;
838 	return old;
839 }
840 
841 void *
842 idr_remove(struct idr *idr, unsigned long id)
843 {
844 	struct idr_entry find, *res;
845 	void *ptr = NULL;
846 
847 	find.id = id;
848 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
849 	if (res) {
850 		SPLAY_REMOVE(idr_tree, &idr->tree, res);
851 		ptr = res->ptr;
852 		pool_put(&idr_pool, res);
853 	}
854 	return ptr;
855 }
856 
857 void *
858 idr_find(struct idr *idr, unsigned long id)
859 {
860 	struct idr_entry find, *res;
861 
862 	find.id = id;
863 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
864 	if (res == NULL)
865 		return NULL;
866 	return res->ptr;
867 }
868 
869 void *
870 idr_get_next(struct idr *idr, int *id)
871 {
872 	struct idr_entry *res;
873 
874 	SPLAY_FOREACH(res, idr_tree, &idr->tree) {
875 		if (res->id >= *id) {
876 			*id = res->id;
877 			return res->ptr;
878 		}
879 	}
880 
881 	return NULL;
882 }
883 
884 int
885 idr_for_each(struct idr *idr, int (*func)(int, void *, void *), void *data)
886 {
887 	struct idr_entry *id;
888 	int ret;
889 
890 	SPLAY_FOREACH(id, idr_tree, &idr->tree) {
891 		ret = func(id->id, id->ptr, data);
892 		if (ret)
893 			return ret;
894 	}
895 
896 	return 0;
897 }
898 
899 int
900 idr_cmp(struct idr_entry *a, struct idr_entry *b)
901 {
902 	return (a->id < b->id ? -1 : a->id > b->id);
903 }
904 
905 SPLAY_GENERATE(idr_tree, idr_entry, entry, idr_cmp);
906 
907 void
908 ida_init(struct ida *ida)
909 {
910 	idr_init(&ida->idr);
911 }
912 
913 void
914 ida_destroy(struct ida *ida)
915 {
916 	idr_destroy(&ida->idr);
917 }
918 
919 int
920 ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
921     gfp_t gfp_mask)
922 {
923 	return idr_alloc(&ida->idr, NULL, start, end, gfp_mask);
924 }
925 
926 void
927 ida_simple_remove(struct ida *ida, unsigned int id)
928 {
929 	idr_remove(&ida->idr, id);
930 }
931 
932 int
933 ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp)
934 {
935 	return idr_alloc(&ida->idr, NULL, min, INT_MAX, gfp);
936 }
937 
938 int
939 ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp)
940 {
941 	return idr_alloc(&ida->idr, NULL, 0, max - 1, gfp);
942 }
943 
944 void
945 ida_free(struct ida *ida, unsigned int id)
946 {
947 	idr_remove(&ida->idr, id);
948 }
949 
950 int
951 xarray_cmp(struct xarray_entry *a, struct xarray_entry *b)
952 {
953 	return (a->id < b->id ? -1 : a->id > b->id);
954 }
955 
956 SPLAY_PROTOTYPE(xarray_tree, xarray_entry, entry, xarray_cmp);
957 struct pool xa_pool;
958 SPLAY_GENERATE(xarray_tree, xarray_entry, entry, xarray_cmp);
959 
960 void
961 xa_init_flags(struct xarray *xa, gfp_t flags)
962 {
963 	static int initialized;
964 
965 	if (!initialized) {
966 		pool_init(&xa_pool, sizeof(struct xarray_entry), 0, IPL_NONE, 0,
967 		    "xapl", NULL);
968 		initialized = 1;
969 	}
970 	SPLAY_INIT(&xa->xa_tree);
971 	if (flags & XA_FLAGS_LOCK_IRQ)
972 		mtx_init(&xa->xa_lock, IPL_TTY);
973 	else
974 		mtx_init(&xa->xa_lock, IPL_NONE);
975 }
976 
977 void
978 xa_destroy(struct xarray *xa)
979 {
980 	struct xarray_entry *id;
981 
982 	while ((id = SPLAY_MIN(xarray_tree, &xa->xa_tree))) {
983 		SPLAY_REMOVE(xarray_tree, &xa->xa_tree, id);
984 		pool_put(&xa_pool, id);
985 	}
986 }
987 
988 /* Don't wrap ids. */
989 int
990 __xa_alloc(struct xarray *xa, u32 *id, void *entry, int limit, gfp_t gfp)
991 {
992 	struct xarray_entry *xid;
993 	int start = (xa->xa_flags & XA_FLAGS_ALLOC1) ? 1 : 0;
994 	int begin;
995 
996 	if (gfp & GFP_NOWAIT) {
997 		xid = pool_get(&xa_pool, PR_NOWAIT);
998 	} else {
999 		mtx_leave(&xa->xa_lock);
1000 		xid = pool_get(&xa_pool, PR_WAITOK);
1001 		mtx_enter(&xa->xa_lock);
1002 	}
1003 
1004 	if (xid == NULL)
1005 		return -ENOMEM;
1006 
1007 	if (limit <= 0)
1008 		limit = INT_MAX;
1009 
1010 	xid->id = begin = start;
1011 
1012 	while (SPLAY_INSERT(xarray_tree, &xa->xa_tree, xid)) {
1013 		if (xid->id == limit)
1014 			xid->id = start;
1015 		else
1016 			xid->id++;
1017 		if (xid->id == begin) {
1018 			pool_put(&xa_pool, xid);
1019 			return -EBUSY;
1020 		}
1021 	}
1022 	xid->ptr = entry;
1023 	*id = xid->id;
1024 	return 0;
1025 }
1026 
1027 /*
1028  * Wrap ids and store next id.
1029  * We walk the entire tree so don't special case wrapping.
1030  * The only caller of this (i915_drm_client.c) doesn't use next id.
1031  */
1032 int
1033 __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, int limit, u32 *next,
1034     gfp_t gfp)
1035 {
1036 	int r = __xa_alloc(xa, id, entry, limit, gfp);
1037 	*next = *id + 1;
1038 	return r;
1039 }
1040 
1041 void *
1042 __xa_erase(struct xarray *xa, unsigned long index)
1043 {
1044 	struct xarray_entry find, *res;
1045 	void *ptr = NULL;
1046 
1047 	find.id = index;
1048 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
1049 	if (res) {
1050 		SPLAY_REMOVE(xarray_tree, &xa->xa_tree, res);
1051 		ptr = res->ptr;
1052 		pool_put(&xa_pool, res);
1053 	}
1054 	return ptr;
1055 }
1056 
1057 void *
1058 __xa_load(struct xarray *xa, unsigned long index)
1059 {
1060 	struct xarray_entry find, *res;
1061 
1062 	find.id = index;
1063 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
1064 	if (res == NULL)
1065 		return NULL;
1066 	return res->ptr;
1067 }
1068 
1069 void *
1070 __xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1071 {
1072 	struct xarray_entry find, *res;
1073 	void *prev;
1074 
1075 	if (entry == NULL)
1076 		return __xa_erase(xa, index);
1077 
1078 	find.id = index;
1079 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
1080 	if (res != NULL) {
1081 		/* index exists */
1082 		/* XXX Multislot entries updates not implemented yet */
1083 		prev = res->ptr;
1084 		res->ptr = entry;
1085 		return prev;
1086 	}
1087 
1088 	/* index not found, add new */
1089 	if (gfp & GFP_NOWAIT) {
1090 		res = pool_get(&xa_pool, PR_NOWAIT);
1091 	} else {
1092 		mtx_leave(&xa->xa_lock);
1093 		res = pool_get(&xa_pool, PR_WAITOK);
1094 		mtx_enter(&xa->xa_lock);
1095 	}
1096 	if (res == NULL)
1097 		return XA_ERROR(-ENOMEM);
1098 	res->id = index;
1099 	res->ptr = entry;
1100 	if (SPLAY_INSERT(xarray_tree, &xa->xa_tree, res) != NULL)
1101 		return XA_ERROR(-EINVAL);
1102 	return NULL; /* no prev entry at index */
1103 }
1104 
1105 void *
1106 xa_get_next(struct xarray *xa, unsigned long *index)
1107 {
1108 	struct xarray_entry *res;
1109 
1110 	SPLAY_FOREACH(res, xarray_tree, &xa->xa_tree) {
1111 		if (res->id >= *index) {
1112 			*index = res->id;
1113 			return res->ptr;
1114 		}
1115 	}
1116 
1117 	return NULL;
1118 }
1119 
1120 int
1121 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
1122 {
1123 	table->sgl = mallocarray(nents, sizeof(struct scatterlist),
1124 	    M_DRM, gfp_mask | M_ZERO);
1125 	if (table->sgl == NULL)
1126 		return -ENOMEM;
1127 	table->nents = table->orig_nents = nents;
1128 	sg_mark_end(&table->sgl[nents - 1]);
1129 	return 0;
1130 }
1131 
1132 void
1133 sg_free_table(struct sg_table *table)
1134 {
1135 	free(table->sgl, M_DRM,
1136 	    table->orig_nents * sizeof(struct scatterlist));
1137 	table->orig_nents = 0;
1138 	table->sgl = NULL;
1139 }
1140 
1141 size_t
1142 sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
1143     const void *buf, size_t buflen)
1144 {
1145 	panic("%s", __func__);
1146 }
1147 
1148 int
1149 i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1150 {
1151 	void *cmd = NULL;
1152 	int cmdlen = 0;
1153 	int err, ret = 0;
1154 	int op;
1155 
1156 	iic_acquire_bus(&adap->ic, 0);
1157 
1158 	while (num > 2) {
1159 		op = (msgs->flags & I2C_M_RD) ? I2C_OP_READ : I2C_OP_WRITE;
1160 		err = iic_exec(&adap->ic, op, msgs->addr, NULL, 0,
1161 		    msgs->buf, msgs->len, 0);
1162 		if (err) {
1163 			ret = -err;
1164 			goto fail;
1165 		}
1166 		msgs++;
1167 		num--;
1168 		ret++;
1169 	}
1170 
1171 	if (num > 1) {
1172 		cmd = msgs->buf;
1173 		cmdlen = msgs->len;
1174 		msgs++;
1175 		num--;
1176 		ret++;
1177 	}
1178 
1179 	op = (msgs->flags & I2C_M_RD) ?
1180 	    I2C_OP_READ_WITH_STOP : I2C_OP_WRITE_WITH_STOP;
1181 	err = iic_exec(&adap->ic, op, msgs->addr, cmd, cmdlen,
1182 	    msgs->buf, msgs->len, 0);
1183 	if (err) {
1184 		ret = -err;
1185 		goto fail;
1186 	}
1187 	msgs++;
1188 	ret++;
1189 
1190 fail:
1191 	iic_release_bus(&adap->ic, 0);
1192 
1193 	return ret;
1194 }
1195 
1196 int
1197 __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1198 {
1199 	int ret, retries;
1200 
1201 	retries = adap->retries;
1202 retry:
1203 	if (adap->algo)
1204 		ret = adap->algo->master_xfer(adap, msgs, num);
1205 	else
1206 		ret = i2c_master_xfer(adap, msgs, num);
1207 	if (ret == -EAGAIN && retries > 0) {
1208 		retries--;
1209 		goto retry;
1210 	}
1211 
1212 	return ret;
1213 }
1214 
1215 int
1216 i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1217 {
1218 	int ret;
1219 
1220 	if (adap->lock_ops)
1221 		adap->lock_ops->lock_bus(adap, 0);
1222 
1223 	ret = __i2c_transfer(adap, msgs, num);
1224 
1225 	if (adap->lock_ops)
1226 		adap->lock_ops->unlock_bus(adap, 0);
1227 
1228 	return ret;
1229 }
1230 
1231 int
1232 i2c_bb_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1233 {
1234 	struct i2c_algo_bit_data *algo = adap->algo_data;
1235 	struct i2c_adapter bb;
1236 
1237 	memset(&bb, 0, sizeof(bb));
1238 	bb.ic = algo->ic;
1239 	bb.retries = adap->retries;
1240 	return i2c_master_xfer(&bb, msgs, num);
1241 }
1242 
1243 uint32_t
1244 i2c_bb_functionality(struct i2c_adapter *adap)
1245 {
1246 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1247 }
1248 
1249 struct i2c_algorithm i2c_bit_algo = {
1250 	.master_xfer = i2c_bb_master_xfer,
1251 	.functionality = i2c_bb_functionality
1252 };
1253 
1254 int
1255 i2c_bit_add_bus(struct i2c_adapter *adap)
1256 {
1257 	adap->algo = &i2c_bit_algo;
1258 	adap->retries = 3;
1259 
1260 	return 0;
1261 }
1262 
1263 #if defined(__amd64__) || defined(__i386__)
1264 
1265 /*
1266  * This is a minimal implementation of the Linux vga_get/vga_put
1267  * interface.  In all likelihood, it will only work for inteldrm(4) as
1268  * it assumes that if there is another active VGA device in the
1269  * system, it is sitting behind a PCI bridge.
1270  */
1271 
1272 extern int pci_enumerate_bus(struct pci_softc *,
1273     int (*)(struct pci_attach_args *), struct pci_attach_args *);
1274 
1275 pcitag_t vga_bridge_tag;
1276 int vga_bridge_disabled;
1277 
1278 int
1279 vga_disable_bridge(struct pci_attach_args *pa)
1280 {
1281 	pcireg_t bhlc, bc;
1282 
1283 	if (pa->pa_domain != 0)
1284 		return 0;
1285 
1286 	bhlc = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
1287 	if (PCI_HDRTYPE_TYPE(bhlc) != 1)
1288 		return 0;
1289 
1290 	bc = pci_conf_read(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL);
1291 	if ((bc & PPB_BC_VGA_ENABLE) == 0)
1292 		return 0;
1293 	bc &= ~PPB_BC_VGA_ENABLE;
1294 	pci_conf_write(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL, bc);
1295 
1296 	vga_bridge_tag = pa->pa_tag;
1297 	vga_bridge_disabled = 1;
1298 
1299 	return 1;
1300 }
1301 
1302 void
1303 vga_get_uninterruptible(struct pci_dev *pdev, int rsrc)
1304 {
1305 	KASSERT(pdev->pci->sc_bridgetag == NULL);
1306 	pci_enumerate_bus(pdev->pci, vga_disable_bridge, NULL);
1307 }
1308 
1309 void
1310 vga_put(struct pci_dev *pdev, int rsrc)
1311 {
1312 	pcireg_t bc;
1313 
1314 	if (!vga_bridge_disabled)
1315 		return;
1316 
1317 	bc = pci_conf_read(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL);
1318 	bc |= PPB_BC_VGA_ENABLE;
1319 	pci_conf_write(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL, bc);
1320 
1321 	vga_bridge_disabled = 0;
1322 }
1323 
1324 #endif
1325 
1326 /*
1327  * ACPI types and interfaces.
1328  */
1329 
1330 #ifdef __HAVE_ACPI
1331 #include "acpi.h"
1332 #endif
1333 
1334 #if NACPI > 0
1335 
1336 #include <dev/acpi/acpireg.h>
1337 #include <dev/acpi/acpivar.h>
1338 #include <dev/acpi/amltypes.h>
1339 #include <dev/acpi/dsdt.h>
1340 
1341 acpi_status
1342 acpi_get_table(const char *sig, int instance,
1343     struct acpi_table_header **hdr)
1344 {
1345 	struct acpi_softc *sc = acpi_softc;
1346 	struct acpi_q *entry;
1347 
1348 	KASSERT(instance == 1);
1349 
1350 	if (sc == NULL)
1351 		return AE_NOT_FOUND;
1352 
1353 	SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) {
1354 		if (memcmp(entry->q_table, sig, strlen(sig)) == 0) {
1355 			*hdr = entry->q_table;
1356 			return 0;
1357 		}
1358 	}
1359 
1360 	return AE_NOT_FOUND;
1361 }
1362 
1363 void
1364 acpi_put_table(struct acpi_table_header *hdr)
1365 {
1366 }
1367 
1368 acpi_status
1369 acpi_get_handle(acpi_handle node, const char *name, acpi_handle *rnode)
1370 {
1371 	node = aml_searchname(node, name);
1372 	if (node == NULL)
1373 		return AE_NOT_FOUND;
1374 
1375 	*rnode = node;
1376 	return 0;
1377 }
1378 
1379 acpi_status
1380 acpi_get_name(acpi_handle node, int type,  struct acpi_buffer *buffer)
1381 {
1382 	KASSERT(buffer->length != ACPI_ALLOCATE_BUFFER);
1383 	KASSERT(type == ACPI_FULL_PATHNAME);
1384 	strlcpy(buffer->pointer, aml_nodename(node), buffer->length);
1385 	return 0;
1386 }
1387 
1388 acpi_status
1389 acpi_evaluate_object(acpi_handle node, const char *name,
1390     struct acpi_object_list *params, struct acpi_buffer *result)
1391 {
1392 	struct aml_value args[4], res;
1393 	union acpi_object *obj;
1394 	uint8_t *data;
1395 	int i;
1396 
1397 	KASSERT(params->count <= nitems(args));
1398 
1399 	for (i = 0; i < params->count; i++) {
1400 		args[i].type = params->pointer[i].type;
1401 		switch (args[i].type) {
1402 		case AML_OBJTYPE_INTEGER:
1403 			args[i].v_integer = params->pointer[i].integer.value;
1404 			break;
1405 		case AML_OBJTYPE_BUFFER:
1406 			args[i].length = params->pointer[i].buffer.length;
1407 			args[i].v_buffer = params->pointer[i].buffer.pointer;
1408 			break;
1409 		default:
1410 			printf("%s: arg type 0x%02x", __func__, args[i].type);
1411 			return AE_BAD_PARAMETER;
1412 		}
1413 	}
1414 
1415 	if (name) {
1416 		node = aml_searchname(node, name);
1417 		if (node == NULL)
1418 			return AE_NOT_FOUND;
1419 	}
1420 	if (aml_evalnode(acpi_softc, node, params->count, args, &res)) {
1421 		aml_freevalue(&res);
1422 		return AE_ERROR;
1423 	}
1424 
1425 	KASSERT(result->length == ACPI_ALLOCATE_BUFFER);
1426 
1427 	result->length = sizeof(union acpi_object);
1428 	switch (res.type) {
1429 	case AML_OBJTYPE_BUFFER:
1430 		result->length += res.length;
1431 		result->pointer = malloc(result->length, M_DRM, M_WAITOK);
1432 		obj = (union acpi_object *)result->pointer;
1433 		data = (uint8_t *)(obj + 1);
1434 		obj->type = res.type;
1435 		obj->buffer.length = res.length;
1436 		obj->buffer.pointer = data;
1437 		memcpy(data, res.v_buffer, res.length);
1438 		break;
1439 	default:
1440 		printf("%s: return type 0x%02x", __func__, res.type);
1441 		aml_freevalue(&res);
1442 		return AE_ERROR;
1443 	}
1444 
1445 	aml_freevalue(&res);
1446 	return 0;
1447 }
1448 
1449 SLIST_HEAD(, notifier_block) drm_linux_acpi_notify_list =
1450 	SLIST_HEAD_INITIALIZER(drm_linux_acpi_notify_list);
1451 
1452 int
1453 drm_linux_acpi_notify(struct aml_node *node, int notify, void *arg)
1454 {
1455 	struct acpi_bus_event event;
1456 	struct notifier_block *nb;
1457 
1458 	event.device_class = ACPI_VIDEO_CLASS;
1459 	event.type = notify;
1460 
1461 	SLIST_FOREACH(nb, &drm_linux_acpi_notify_list, link)
1462 		nb->notifier_call(nb, 0, &event);
1463 	return 0;
1464 }
1465 
1466 int
1467 register_acpi_notifier(struct notifier_block *nb)
1468 {
1469 	SLIST_INSERT_HEAD(&drm_linux_acpi_notify_list, nb, link);
1470 	return 0;
1471 }
1472 
1473 int
1474 unregister_acpi_notifier(struct notifier_block *nb)
1475 {
1476 	struct notifier_block *tmp;
1477 
1478 	SLIST_FOREACH(tmp, &drm_linux_acpi_notify_list, link) {
1479 		if (tmp == nb) {
1480 			SLIST_REMOVE(&drm_linux_acpi_notify_list, nb,
1481 			    notifier_block, link);
1482 			return 0;
1483 		}
1484 	}
1485 
1486 	return -ENOENT;
1487 }
1488 
1489 const char *
1490 acpi_format_exception(acpi_status status)
1491 {
1492 	switch (status) {
1493 	case AE_NOT_FOUND:
1494 		return "not found";
1495 	case AE_BAD_PARAMETER:
1496 		return "bad parameter";
1497 	default:
1498 		return "unknown";
1499 	}
1500 }
1501 
1502 #endif
1503 
1504 SLIST_HEAD(,backlight_device) backlight_device_list =
1505     SLIST_HEAD_INITIALIZER(backlight_device_list);
1506 
1507 void
1508 backlight_do_update_status(void *arg)
1509 {
1510 	backlight_update_status(arg);
1511 }
1512 
1513 struct backlight_device *
1514 backlight_device_register(const char *name, void *kdev, void *data,
1515     const struct backlight_ops *ops, const struct backlight_properties *props)
1516 {
1517 	struct backlight_device *bd;
1518 
1519 	bd = malloc(sizeof(*bd), M_DRM, M_WAITOK);
1520 	bd->ops = ops;
1521 	bd->props = *props;
1522 	bd->data = data;
1523 
1524 	task_set(&bd->task, backlight_do_update_status, bd);
1525 
1526 	SLIST_INSERT_HEAD(&backlight_device_list, bd, next);
1527 	bd->name = name;
1528 
1529 	return bd;
1530 }
1531 
1532 void
1533 backlight_device_unregister(struct backlight_device *bd)
1534 {
1535 	SLIST_REMOVE(&backlight_device_list, bd, backlight_device, next);
1536 	free(bd, M_DRM, sizeof(*bd));
1537 }
1538 
1539 void
1540 backlight_schedule_update_status(struct backlight_device *bd)
1541 {
1542 	task_add(systq, &bd->task);
1543 }
1544 
1545 int
1546 backlight_enable(struct backlight_device *bd)
1547 {
1548 	if (bd == NULL)
1549 		return 0;
1550 
1551 	bd->props.power = FB_BLANK_UNBLANK;
1552 
1553 	return bd->ops->update_status(bd);
1554 }
1555 
1556 int
1557 backlight_disable(struct backlight_device *bd)
1558 {
1559 	if (bd == NULL)
1560 		return 0;
1561 
1562 	bd->props.power = FB_BLANK_POWERDOWN;
1563 
1564 	return bd->ops->update_status(bd);
1565 }
1566 
1567 struct backlight_device *
1568 backlight_device_get_by_name(const char *name)
1569 {
1570 	struct backlight_device *bd;
1571 
1572 	SLIST_FOREACH(bd, &backlight_device_list, next) {
1573 		if (strcmp(name, bd->name) == 0)
1574 			return bd;
1575 	}
1576 
1577 	return NULL;
1578 }
1579 
1580 struct drvdata {
1581 	struct device *dev;
1582 	void *data;
1583 	SLIST_ENTRY(drvdata) next;
1584 };
1585 
1586 SLIST_HEAD(,drvdata) drvdata_list = SLIST_HEAD_INITIALIZER(drvdata_list);
1587 
1588 void
1589 dev_set_drvdata(struct device *dev, void *data)
1590 {
1591 	struct drvdata *drvdata;
1592 
1593 	SLIST_FOREACH(drvdata, &drvdata_list, next) {
1594 		if (drvdata->dev == dev) {
1595 			drvdata->data = data;
1596 			return;
1597 		}
1598 	}
1599 
1600 	if (data == NULL)
1601 		return;
1602 
1603 	drvdata = malloc(sizeof(*drvdata), M_DRM, M_WAITOK);
1604 	drvdata->dev = dev;
1605 	drvdata->data = data;
1606 
1607 	SLIST_INSERT_HEAD(&drvdata_list, drvdata, next);
1608 }
1609 
1610 void *
1611 dev_get_drvdata(struct device *dev)
1612 {
1613 	struct drvdata *drvdata;
1614 
1615 	SLIST_FOREACH(drvdata, &drvdata_list, next) {
1616 		if (drvdata->dev == dev)
1617 			return drvdata->data;
1618 	}
1619 
1620 	return NULL;
1621 }
1622 
1623 void
1624 drm_sysfs_hotplug_event(struct drm_device *dev)
1625 {
1626 	knote_locked(&dev->note, NOTE_CHANGE);
1627 }
1628 
1629 void
1630 drm_sysfs_connector_hotplug_event(struct drm_connector *connector)
1631 {
1632 	knote_locked(&connector->dev->note, NOTE_CHANGE);
1633 }
1634 
1635 void
1636 drm_sysfs_connector_status_event(struct drm_connector *connector,
1637     struct drm_property *property)
1638 {
1639 	STUB();
1640 }
1641 
1642 void
1643 drm_sysfs_connector_property_event(struct drm_connector *connector,
1644     struct drm_property *property)
1645 {
1646 	STUB();
1647 }
1648 
1649 struct dma_fence *
1650 dma_fence_get(struct dma_fence *fence)
1651 {
1652 	if (fence)
1653 		kref_get(&fence->refcount);
1654 	return fence;
1655 }
1656 
1657 struct dma_fence *
1658 dma_fence_get_rcu(struct dma_fence *fence)
1659 {
1660 	if (fence)
1661 		kref_get(&fence->refcount);
1662 	return fence;
1663 }
1664 
1665 struct dma_fence *
1666 dma_fence_get_rcu_safe(struct dma_fence **dfp)
1667 {
1668 	struct dma_fence *fence;
1669 	if (dfp == NULL)
1670 		return NULL;
1671 	fence = *dfp;
1672 	if (fence)
1673 		kref_get(&fence->refcount);
1674 	return fence;
1675 }
1676 
1677 void
1678 dma_fence_release(struct kref *ref)
1679 {
1680 	struct dma_fence *fence = container_of(ref, struct dma_fence, refcount);
1681 	if (fence->ops && fence->ops->release)
1682 		fence->ops->release(fence);
1683 	else
1684 		free(fence, M_DRM, 0);
1685 }
1686 
1687 void
1688 dma_fence_put(struct dma_fence *fence)
1689 {
1690 	if (fence)
1691 		kref_put(&fence->refcount, dma_fence_release);
1692 }
1693 
1694 int
1695 dma_fence_signal_timestamp_locked(struct dma_fence *fence, ktime_t timestamp)
1696 {
1697 	struct dma_fence_cb *cur, *tmp;
1698 	struct list_head cb_list;
1699 
1700 	if (fence == NULL)
1701 		return -EINVAL;
1702 
1703 	if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1704 		return -EINVAL;
1705 
1706 	list_replace(&fence->cb_list, &cb_list);
1707 
1708 	fence->timestamp = timestamp;
1709 	set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
1710 
1711 	list_for_each_entry_safe(cur, tmp, &cb_list, node) {
1712 		INIT_LIST_HEAD(&cur->node);
1713 		cur->func(fence, cur);
1714 	}
1715 
1716 	return 0;
1717 }
1718 
1719 int
1720 dma_fence_signal(struct dma_fence *fence)
1721 {
1722 	int r;
1723 
1724 	if (fence == NULL)
1725 		return -EINVAL;
1726 
1727 	mtx_enter(fence->lock);
1728 	r = dma_fence_signal_timestamp_locked(fence, ktime_get());
1729 	mtx_leave(fence->lock);
1730 
1731 	return r;
1732 }
1733 
1734 int
1735 dma_fence_signal_locked(struct dma_fence *fence)
1736 {
1737 	if (fence == NULL)
1738 		return -EINVAL;
1739 
1740 	return dma_fence_signal_timestamp_locked(fence, ktime_get());
1741 }
1742 
1743 int
1744 dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp)
1745 {
1746 	int r;
1747 
1748 	if (fence == NULL)
1749 		return -EINVAL;
1750 
1751 	mtx_enter(fence->lock);
1752 	r = dma_fence_signal_timestamp_locked(fence, timestamp);
1753 	mtx_leave(fence->lock);
1754 
1755 	return r;
1756 }
1757 
1758 bool
1759 dma_fence_is_signaled(struct dma_fence *fence)
1760 {
1761 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1762 		return true;
1763 
1764 	if (fence->ops->signaled && fence->ops->signaled(fence)) {
1765 		dma_fence_signal(fence);
1766 		return true;
1767 	}
1768 
1769 	return false;
1770 }
1771 
1772 bool
1773 dma_fence_is_signaled_locked(struct dma_fence *fence)
1774 {
1775 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1776 		return true;
1777 
1778 	if (fence->ops->signaled && fence->ops->signaled(fence)) {
1779 		dma_fence_signal_locked(fence);
1780 		return true;
1781 	}
1782 
1783 	return false;
1784 }
1785 
1786 ktime_t
1787 dma_fence_timestamp(struct dma_fence *fence)
1788 {
1789 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1790 		while (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
1791 			CPU_BUSY_CYCLE();
1792 		return fence->timestamp;
1793 	} else {
1794 		return ktime_get();
1795 	}
1796 }
1797 
1798 long
1799 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, long timeout)
1800 {
1801 	if (timeout < 0)
1802 		return -EINVAL;
1803 
1804 	if (fence->ops->wait)
1805 		return fence->ops->wait(fence, intr, timeout);
1806 	else
1807 		return dma_fence_default_wait(fence, intr, timeout);
1808 }
1809 
1810 long
1811 dma_fence_wait(struct dma_fence *fence, bool intr)
1812 {
1813 	long ret;
1814 
1815 	ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
1816 	if (ret < 0)
1817 		return ret;
1818 
1819 	return 0;
1820 }
1821 
1822 void
1823 dma_fence_enable_sw_signaling(struct dma_fence *fence)
1824 {
1825 	if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
1826 	    !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
1827 	    fence->ops->enable_signaling) {
1828 		mtx_enter(fence->lock);
1829 		if (!fence->ops->enable_signaling(fence))
1830 			dma_fence_signal_locked(fence);
1831 		mtx_leave(fence->lock);
1832 	}
1833 }
1834 
1835 void
1836 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
1837     struct mutex *lock, uint64_t context, uint64_t seqno)
1838 {
1839 	fence->ops = ops;
1840 	fence->lock = lock;
1841 	fence->context = context;
1842 	fence->seqno = seqno;
1843 	fence->flags = 0;
1844 	fence->error = 0;
1845 	kref_init(&fence->refcount);
1846 	INIT_LIST_HEAD(&fence->cb_list);
1847 }
1848 
1849 int
1850 dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
1851     dma_fence_func_t func)
1852 {
1853 	int ret = 0;
1854 	bool was_set;
1855 
1856 	if (WARN_ON(!fence || !func))
1857 		return -EINVAL;
1858 
1859 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1860 		INIT_LIST_HEAD(&cb->node);
1861 		return -ENOENT;
1862 	}
1863 
1864 	mtx_enter(fence->lock);
1865 
1866 	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
1867 
1868 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1869 		ret = -ENOENT;
1870 	else if (!was_set && fence->ops->enable_signaling) {
1871 		if (!fence->ops->enable_signaling(fence)) {
1872 			dma_fence_signal_locked(fence);
1873 			ret = -ENOENT;
1874 		}
1875 	}
1876 
1877 	if (!ret) {
1878 		cb->func = func;
1879 		list_add_tail(&cb->node, &fence->cb_list);
1880 	} else
1881 		INIT_LIST_HEAD(&cb->node);
1882 	mtx_leave(fence->lock);
1883 
1884 	return ret;
1885 }
1886 
1887 bool
1888 dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
1889 {
1890 	bool ret;
1891 
1892 	mtx_enter(fence->lock);
1893 
1894 	ret = !list_empty(&cb->node);
1895 	if (ret)
1896 		list_del_init(&cb->node);
1897 
1898 	mtx_leave(fence->lock);
1899 
1900 	return ret;
1901 }
1902 
1903 static atomic64_t drm_fence_context_count = ATOMIC64_INIT(1);
1904 
1905 uint64_t
1906 dma_fence_context_alloc(unsigned int num)
1907 {
1908   return atomic64_add_return(num, &drm_fence_context_count) - num;
1909 }
1910 
1911 struct default_wait_cb {
1912 	struct dma_fence_cb base;
1913 	struct proc *proc;
1914 };
1915 
1916 static void
1917 dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1918 {
1919 	struct default_wait_cb *wait =
1920 	    container_of(cb, struct default_wait_cb, base);
1921 	wake_up_process(wait->proc);
1922 }
1923 
1924 long
1925 dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
1926 {
1927 	long ret = timeout ? timeout : 1;
1928 	unsigned long end;
1929 	int err;
1930 	struct default_wait_cb cb;
1931 	bool was_set;
1932 
1933 	KASSERT(timeout <= INT_MAX);
1934 
1935 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1936 		return ret;
1937 
1938 	mtx_enter(fence->lock);
1939 
1940 	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
1941 	    &fence->flags);
1942 
1943 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1944 		goto out;
1945 
1946 	if (!was_set && fence->ops->enable_signaling) {
1947 		if (!fence->ops->enable_signaling(fence)) {
1948 			dma_fence_signal_locked(fence);
1949 			goto out;
1950 		}
1951 	}
1952 
1953 	if (timeout == 0) {
1954 		ret = 0;
1955 		goto out;
1956 	}
1957 
1958 	cb.base.func = dma_fence_default_wait_cb;
1959 	cb.proc = curproc;
1960 	list_add(&cb.base.node, &fence->cb_list);
1961 
1962 	end = jiffies + timeout;
1963 	for (ret = timeout; ret > 0; ret = MAX(0, end - jiffies)) {
1964 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1965 			break;
1966 		err = msleep(curproc, fence->lock, intr ? PCATCH : 0,
1967 		    "dmafence", ret);
1968 		if (err == EINTR || err == ERESTART) {
1969 			ret = -ERESTARTSYS;
1970 			break;
1971 		}
1972 	}
1973 
1974 	if (!list_empty(&cb.base.node))
1975 		list_del(&cb.base.node);
1976 out:
1977 	mtx_leave(fence->lock);
1978 
1979 	return ret;
1980 }
1981 
1982 static bool
1983 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
1984     uint32_t *idx)
1985 {
1986 	int i;
1987 
1988 	for (i = 0; i < count; ++i) {
1989 		struct dma_fence *fence = fences[i];
1990 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1991 			if (idx)
1992 				*idx = i;
1993 			return true;
1994 		}
1995 	}
1996 	return false;
1997 }
1998 
1999 long
2000 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
2001     bool intr, long timeout, uint32_t *idx)
2002 {
2003 	struct default_wait_cb *cb;
2004 	long ret = timeout;
2005 	unsigned long end;
2006 	int i, err;
2007 
2008 	KASSERT(timeout <= INT_MAX);
2009 
2010 	if (timeout == 0) {
2011 		for (i = 0; i < count; i++) {
2012 			if (dma_fence_is_signaled(fences[i])) {
2013 				if (idx)
2014 					*idx = i;
2015 				return 1;
2016 			}
2017 		}
2018 		return 0;
2019 	}
2020 
2021 	cb = mallocarray(count, sizeof(*cb), M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
2022 	if (cb == NULL)
2023 		return -ENOMEM;
2024 
2025 	for (i = 0; i < count; i++) {
2026 		struct dma_fence *fence = fences[i];
2027 		cb[i].proc = curproc;
2028 		if (dma_fence_add_callback(fence, &cb[i].base,
2029 		    dma_fence_default_wait_cb)) {
2030 			if (idx)
2031 				*idx = i;
2032 			goto cb_cleanup;
2033 		}
2034 	}
2035 
2036 	end = jiffies + timeout;
2037 	for (ret = timeout; ret > 0; ret = MAX(0, end - jiffies)) {
2038 		if (dma_fence_test_signaled_any(fences, count, idx))
2039 			break;
2040 		err = tsleep(curproc, intr ? PCATCH : 0, "dfwat", ret);
2041 		if (err == EINTR || err == ERESTART) {
2042 			ret = -ERESTARTSYS;
2043 			break;
2044 		}
2045 	}
2046 
2047 cb_cleanup:
2048 	while (i-- > 0)
2049 		dma_fence_remove_callback(fences[i], &cb[i].base);
2050 	free(cb, M_DRM, count * sizeof(*cb));
2051 	return ret;
2052 }
2053 
2054 void
2055 dma_fence_set_deadline(struct dma_fence *f, ktime_t t)
2056 {
2057 	if (f->ops->set_deadline == NULL)
2058 		return;
2059 	if (dma_fence_is_signaled(f) == false)
2060 		f->ops->set_deadline(f, t);
2061 }
2062 
2063 static struct dma_fence dma_fence_stub;
2064 static struct mutex dma_fence_stub_mtx = MUTEX_INITIALIZER(IPL_TTY);
2065 
2066 static const char *
2067 dma_fence_stub_get_name(struct dma_fence *fence)
2068 {
2069 	return "stub";
2070 }
2071 
2072 static const struct dma_fence_ops dma_fence_stub_ops = {
2073 	.get_driver_name = dma_fence_stub_get_name,
2074 	.get_timeline_name = dma_fence_stub_get_name,
2075 };
2076 
2077 struct dma_fence *
2078 dma_fence_get_stub(void)
2079 {
2080 	mtx_enter(&dma_fence_stub_mtx);
2081 	if (dma_fence_stub.ops == NULL) {
2082 		dma_fence_init(&dma_fence_stub, &dma_fence_stub_ops,
2083 		    &dma_fence_stub_mtx, 0, 0);
2084 		dma_fence_signal_locked(&dma_fence_stub);
2085 	}
2086 	mtx_leave(&dma_fence_stub_mtx);
2087 
2088 	return dma_fence_get(&dma_fence_stub);
2089 }
2090 
2091 struct dma_fence *
2092 dma_fence_allocate_private_stub(ktime_t ts)
2093 {
2094 	struct dma_fence *f = malloc(sizeof(*f), M_DRM,
2095 	    M_ZERO | M_WAITOK | M_CANFAIL);
2096 	if (f == NULL)
2097 		return NULL;
2098 	dma_fence_init(f, &dma_fence_stub_ops, &dma_fence_stub_mtx, 0, 0);
2099 	dma_fence_signal_timestamp(f, ts);
2100 	return f;
2101 }
2102 
2103 static const char *
2104 dma_fence_array_get_driver_name(struct dma_fence *fence)
2105 {
2106 	return "dma_fence_array";
2107 }
2108 
2109 static const char *
2110 dma_fence_array_get_timeline_name(struct dma_fence *fence)
2111 {
2112 	return "unbound";
2113 }
2114 
2115 static void
2116 irq_dma_fence_array_work(void *arg)
2117 {
2118 	struct dma_fence_array *dfa = (struct dma_fence_array *)arg;
2119 	dma_fence_signal(&dfa->base);
2120 	dma_fence_put(&dfa->base);
2121 }
2122 
2123 static void
2124 dma_fence_array_cb_func(struct dma_fence *f, struct dma_fence_cb *cb)
2125 {
2126 	struct dma_fence_array_cb *array_cb =
2127 	    container_of(cb, struct dma_fence_array_cb, cb);
2128 	struct dma_fence_array *dfa = array_cb->array;
2129 
2130 	if (atomic_dec_and_test(&dfa->num_pending))
2131 		timeout_add(&dfa->to, 1);
2132 	else
2133 		dma_fence_put(&dfa->base);
2134 }
2135 
2136 static bool
2137 dma_fence_array_enable_signaling(struct dma_fence *fence)
2138 {
2139 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
2140 	struct dma_fence_array_cb *cb = (void *)(&dfa[1]);
2141 	int i;
2142 
2143 	for (i = 0; i < dfa->num_fences; ++i) {
2144 		cb[i].array = dfa;
2145 		dma_fence_get(&dfa->base);
2146 		if (dma_fence_add_callback(dfa->fences[i], &cb[i].cb,
2147 		    dma_fence_array_cb_func)) {
2148 			dma_fence_put(&dfa->base);
2149 			if (atomic_dec_and_test(&dfa->num_pending))
2150 				return false;
2151 		}
2152 	}
2153 
2154 	return true;
2155 }
2156 
2157 static bool
2158 dma_fence_array_signaled(struct dma_fence *fence)
2159 {
2160 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
2161 
2162 	return atomic_read(&dfa->num_pending) <= 0;
2163 }
2164 
2165 static void
2166 dma_fence_array_release(struct dma_fence *fence)
2167 {
2168 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
2169 	int i;
2170 
2171 	for (i = 0; i < dfa->num_fences; ++i)
2172 		dma_fence_put(dfa->fences[i]);
2173 
2174 	free(dfa->fences, M_DRM, 0);
2175 	dma_fence_free(fence);
2176 }
2177 
2178 struct dma_fence_array *
2179 dma_fence_array_create(int num_fences, struct dma_fence **fences, u64 context,
2180     unsigned seqno, bool signal_on_any)
2181 {
2182 	struct dma_fence_array *dfa = malloc(sizeof(*dfa) +
2183 	    (num_fences * sizeof(struct dma_fence_array_cb)),
2184 	    M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
2185 	if (dfa == NULL)
2186 		return NULL;
2187 
2188 	mtx_init(&dfa->lock, IPL_TTY);
2189 	dma_fence_init(&dfa->base, &dma_fence_array_ops, &dfa->lock,
2190 	    context, seqno);
2191 	timeout_set(&dfa->to, irq_dma_fence_array_work, dfa);
2192 
2193 	dfa->num_fences = num_fences;
2194 	atomic_set(&dfa->num_pending, signal_on_any ? 1 : num_fences);
2195 	dfa->fences = fences;
2196 
2197 	return dfa;
2198 }
2199 
2200 struct dma_fence *
2201 dma_fence_array_first(struct dma_fence *f)
2202 {
2203 	struct dma_fence_array *dfa;
2204 
2205 	if (f == NULL)
2206 		return NULL;
2207 
2208 	if ((dfa = to_dma_fence_array(f)) == NULL)
2209 		return f;
2210 
2211 	if (dfa->num_fences > 0)
2212 		return dfa->fences[0];
2213 
2214 	return NULL;
2215 }
2216 
2217 struct dma_fence *
2218 dma_fence_array_next(struct dma_fence *f, unsigned int i)
2219 {
2220 	struct dma_fence_array *dfa;
2221 
2222 	if (f == NULL)
2223 		return NULL;
2224 
2225 	if ((dfa = to_dma_fence_array(f)) == NULL)
2226 		return NULL;
2227 
2228 	if (i < dfa->num_fences)
2229 		return dfa->fences[i];
2230 
2231 	return NULL;
2232 }
2233 
2234 const struct dma_fence_ops dma_fence_array_ops = {
2235 	.get_driver_name = dma_fence_array_get_driver_name,
2236 	.get_timeline_name = dma_fence_array_get_timeline_name,
2237 	.enable_signaling = dma_fence_array_enable_signaling,
2238 	.signaled = dma_fence_array_signaled,
2239 	.release = dma_fence_array_release,
2240 };
2241 
2242 int
2243 dma_fence_chain_find_seqno(struct dma_fence **df, uint64_t seqno)
2244 {
2245 	struct dma_fence_chain *chain;
2246 	struct dma_fence *fence;
2247 
2248 	if (seqno == 0)
2249 		return 0;
2250 
2251 	if ((chain = to_dma_fence_chain(*df)) == NULL)
2252 		return -EINVAL;
2253 
2254 	fence = &chain->base;
2255 	if (fence->seqno < seqno)
2256 		return -EINVAL;
2257 
2258 	dma_fence_chain_for_each(*df, fence) {
2259 		if ((*df)->context != fence->context)
2260 			break;
2261 
2262 		chain = to_dma_fence_chain(*df);
2263 		if (chain->prev_seqno < seqno)
2264 			break;
2265 	}
2266 	dma_fence_put(fence);
2267 
2268 	return 0;
2269 }
2270 
2271 void
2272 dma_fence_chain_init(struct dma_fence_chain *chain, struct dma_fence *prev,
2273     struct dma_fence *fence, uint64_t seqno)
2274 {
2275 	uint64_t context;
2276 
2277 	chain->fence = fence;
2278 	chain->prev = prev;
2279 	mtx_init(&chain->lock, IPL_TTY);
2280 
2281 	/* if prev is a chain */
2282 	if (to_dma_fence_chain(prev) != NULL) {
2283 		if (__dma_fence_is_later(seqno, prev->seqno, prev->ops)) {
2284 			chain->prev_seqno = prev->seqno;
2285 			context = prev->context;
2286 		} else {
2287 			chain->prev_seqno = 0;
2288 			context = dma_fence_context_alloc(1);
2289 			seqno = prev->seqno;
2290 		}
2291 	} else {
2292 		chain->prev_seqno = 0;
2293 		context = dma_fence_context_alloc(1);
2294 	}
2295 
2296 	dma_fence_init(&chain->base, &dma_fence_chain_ops, &chain->lock,
2297 	    context, seqno);
2298 }
2299 
2300 static const char *
2301 dma_fence_chain_get_driver_name(struct dma_fence *fence)
2302 {
2303 	return "dma_fence_chain";
2304 }
2305 
2306 static const char *
2307 dma_fence_chain_get_timeline_name(struct dma_fence *fence)
2308 {
2309 	return "unbound";
2310 }
2311 
2312 static bool dma_fence_chain_enable_signaling(struct dma_fence *);
2313 
2314 static void
2315 dma_fence_chain_timo(void *arg)
2316 {
2317 	struct dma_fence_chain *chain = (struct dma_fence_chain *)arg;
2318 
2319 	if (dma_fence_chain_enable_signaling(&chain->base) == false)
2320 		dma_fence_signal(&chain->base);
2321 	dma_fence_put(&chain->base);
2322 }
2323 
2324 static void
2325 dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb)
2326 {
2327 	struct dma_fence_chain *chain =
2328 	    container_of(cb, struct dma_fence_chain, cb);
2329 	timeout_set(&chain->to, dma_fence_chain_timo, chain);
2330 	timeout_add(&chain->to, 1);
2331 	dma_fence_put(f);
2332 }
2333 
2334 static bool
2335 dma_fence_chain_enable_signaling(struct dma_fence *fence)
2336 {
2337 	struct dma_fence_chain *chain, *h;
2338 	struct dma_fence *f;
2339 
2340 	h = to_dma_fence_chain(fence);
2341 	dma_fence_get(&h->base);
2342 	dma_fence_chain_for_each(fence, &h->base) {
2343 		chain = to_dma_fence_chain(fence);
2344 		if (chain == NULL)
2345 			f = fence;
2346 		else
2347 			f = chain->fence;
2348 
2349 		dma_fence_get(f);
2350 		if (!dma_fence_add_callback(f, &h->cb, dma_fence_chain_cb)) {
2351 			dma_fence_put(fence);
2352 			return true;
2353 		}
2354 		dma_fence_put(f);
2355 	}
2356 	dma_fence_put(&h->base);
2357 	return false;
2358 }
2359 
2360 static bool
2361 dma_fence_chain_signaled(struct dma_fence *fence)
2362 {
2363 	struct dma_fence_chain *chain;
2364 	struct dma_fence *f;
2365 
2366 	dma_fence_chain_for_each(fence, fence) {
2367 		chain = to_dma_fence_chain(fence);
2368 		if (chain == NULL)
2369 			f = fence;
2370 		else
2371 			f = chain->fence;
2372 
2373 		if (dma_fence_is_signaled(f) == false) {
2374 			dma_fence_put(fence);
2375 			return false;
2376 		}
2377 	}
2378 	return true;
2379 }
2380 
2381 static void
2382 dma_fence_chain_release(struct dma_fence *fence)
2383 {
2384 	struct dma_fence_chain *chain = to_dma_fence_chain(fence);
2385 	struct dma_fence_chain *prev_chain;
2386 	struct dma_fence *prev;
2387 
2388 	for (prev = chain->prev; prev != NULL; prev = chain->prev) {
2389 		if (kref_read(&prev->refcount) > 1)
2390 			break;
2391 		if ((prev_chain = to_dma_fence_chain(prev)) == NULL)
2392 			break;
2393 		chain->prev = prev_chain->prev;
2394 		prev_chain->prev = NULL;
2395 		dma_fence_put(prev);
2396 	}
2397 	dma_fence_put(prev);
2398 	dma_fence_put(chain->fence);
2399 	dma_fence_free(fence);
2400 }
2401 
2402 struct dma_fence *
2403 dma_fence_chain_walk(struct dma_fence *fence)
2404 {
2405 	struct dma_fence_chain *chain = to_dma_fence_chain(fence), *prev_chain;
2406 	struct dma_fence *prev, *new_prev, *tmp;
2407 
2408 	if (chain == NULL) {
2409 		dma_fence_put(fence);
2410 		return NULL;
2411 	}
2412 
2413 	while ((prev = dma_fence_get(chain->prev)) != NULL) {
2414 		prev_chain = to_dma_fence_chain(prev);
2415 		if (prev_chain != NULL) {
2416 			if (!dma_fence_is_signaled(prev_chain->fence))
2417 				break;
2418 			new_prev = dma_fence_get(prev_chain->prev);
2419 		} else {
2420 			if (!dma_fence_is_signaled(prev))
2421 				break;
2422 			new_prev = NULL;
2423 		}
2424 		tmp = atomic_cas_ptr(&chain->prev, prev, new_prev);
2425 		dma_fence_put(tmp == prev ? prev : new_prev);
2426 		dma_fence_put(prev);
2427 	}
2428 
2429 	dma_fence_put(fence);
2430 	return prev;
2431 }
2432 
2433 const struct dma_fence_ops dma_fence_chain_ops = {
2434 	.get_driver_name = dma_fence_chain_get_driver_name,
2435 	.get_timeline_name = dma_fence_chain_get_timeline_name,
2436 	.enable_signaling = dma_fence_chain_enable_signaling,
2437 	.signaled = dma_fence_chain_signaled,
2438 	.release = dma_fence_chain_release,
2439 	.use_64bit_seqno = true,
2440 };
2441 
2442 bool
2443 dma_fence_is_container(struct dma_fence *fence)
2444 {
2445 	return (fence->ops == &dma_fence_chain_ops) ||
2446 	    (fence->ops == &dma_fence_array_ops);
2447 }
2448 
2449 int
2450 dmabuf_read(struct file *fp, struct uio *uio, int fflags)
2451 {
2452 	return (ENXIO);
2453 }
2454 
2455 int
2456 dmabuf_write(struct file *fp, struct uio *uio, int fflags)
2457 {
2458 	return (ENXIO);
2459 }
2460 
2461 int
2462 dmabuf_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
2463 {
2464 	return (ENOTTY);
2465 }
2466 
2467 int
2468 dmabuf_kqfilter(struct file *fp, struct knote *kn)
2469 {
2470 	return (EINVAL);
2471 }
2472 
2473 int
2474 dmabuf_stat(struct file *fp, struct stat *st, struct proc *p)
2475 {
2476 	struct dma_buf *dmabuf = fp->f_data;
2477 
2478 	memset(st, 0, sizeof(*st));
2479 	st->st_size = dmabuf->size;
2480 	st->st_mode = S_IFIFO;	/* XXX */
2481 	return (0);
2482 }
2483 
2484 int
2485 dmabuf_close(struct file *fp, struct proc *p)
2486 {
2487 	struct dma_buf *dmabuf = fp->f_data;
2488 
2489 	fp->f_data = NULL;
2490 	KERNEL_LOCK();
2491 	dmabuf->ops->release(dmabuf);
2492 	KERNEL_UNLOCK();
2493 	free(dmabuf, M_DRM, sizeof(struct dma_buf));
2494 	return (0);
2495 }
2496 
2497 int
2498 dmabuf_seek(struct file *fp, off_t *offset, int whence, struct proc *p)
2499 {
2500 	struct dma_buf *dmabuf = fp->f_data;
2501 	off_t newoff;
2502 
2503 	if (*offset != 0)
2504 		return (EINVAL);
2505 
2506 	switch (whence) {
2507 	case SEEK_SET:
2508 		newoff = 0;
2509 		break;
2510 	case SEEK_END:
2511 		newoff = dmabuf->size;
2512 		break;
2513 	default:
2514 		return (EINVAL);
2515 	}
2516 	mtx_enter(&fp->f_mtx);
2517 	fp->f_offset = newoff;
2518 	mtx_leave(&fp->f_mtx);
2519 	*offset = newoff;
2520 	return (0);
2521 }
2522 
2523 const struct fileops dmabufops = {
2524 	.fo_read	= dmabuf_read,
2525 	.fo_write	= dmabuf_write,
2526 	.fo_ioctl	= dmabuf_ioctl,
2527 	.fo_kqfilter	= dmabuf_kqfilter,
2528 	.fo_stat	= dmabuf_stat,
2529 	.fo_close	= dmabuf_close,
2530 	.fo_seek	= dmabuf_seek,
2531 };
2532 
2533 struct dma_buf *
2534 dma_buf_export(const struct dma_buf_export_info *info)
2535 {
2536 	struct proc *p = curproc;
2537 	struct dma_buf *dmabuf;
2538 	struct file *fp;
2539 
2540 	fp = fnew(p);
2541 	if (fp == NULL)
2542 		return ERR_PTR(-ENFILE);
2543 	fp->f_type = DTYPE_DMABUF;
2544 	fp->f_ops = &dmabufops;
2545 	dmabuf = malloc(sizeof(struct dma_buf), M_DRM, M_WAITOK | M_ZERO);
2546 	dmabuf->priv = info->priv;
2547 	dmabuf->ops = info->ops;
2548 	dmabuf->size = info->size;
2549 	dmabuf->file = fp;
2550 	fp->f_data = dmabuf;
2551 	INIT_LIST_HEAD(&dmabuf->attachments);
2552 	return dmabuf;
2553 }
2554 
2555 struct dma_buf *
2556 dma_buf_get(int fd)
2557 {
2558 	struct proc *p = curproc;
2559 	struct filedesc *fdp = p->p_fd;
2560 	struct file *fp;
2561 
2562 	if ((fp = fd_getfile(fdp, fd)) == NULL)
2563 		return ERR_PTR(-EBADF);
2564 
2565 	if (fp->f_type != DTYPE_DMABUF) {
2566 		FRELE(fp, p);
2567 		return ERR_PTR(-EINVAL);
2568 	}
2569 
2570 	return fp->f_data;
2571 }
2572 
2573 void
2574 dma_buf_put(struct dma_buf *dmabuf)
2575 {
2576 	KASSERT(dmabuf);
2577 	KASSERT(dmabuf->file);
2578 
2579 	FRELE(dmabuf->file, curproc);
2580 }
2581 
2582 int
2583 dma_buf_fd(struct dma_buf *dmabuf, int flags)
2584 {
2585 	struct proc *p = curproc;
2586 	struct filedesc *fdp = p->p_fd;
2587 	struct file *fp = dmabuf->file;
2588 	int fd, cloexec, error;
2589 
2590 	cloexec = (flags & O_CLOEXEC) ? UF_EXCLOSE : 0;
2591 
2592 	fdplock(fdp);
2593 restart:
2594 	if ((error = fdalloc(p, 0, &fd)) != 0) {
2595 		if (error == ENOSPC) {
2596 			fdexpand(p);
2597 			goto restart;
2598 		}
2599 		fdpunlock(fdp);
2600 		return -error;
2601 	}
2602 
2603 	fdinsert(fdp, fd, cloexec, fp);
2604 	fdpunlock(fdp);
2605 
2606 	return fd;
2607 }
2608 
2609 void
2610 get_dma_buf(struct dma_buf *dmabuf)
2611 {
2612 	FREF(dmabuf->file);
2613 }
2614 
2615 enum pci_bus_speed
2616 pcie_get_speed_cap(struct pci_dev *pdev)
2617 {
2618 	pci_chipset_tag_t	pc;
2619 	pcitag_t		tag;
2620 	int			pos ;
2621 	pcireg_t		xcap, lnkcap = 0, lnkcap2 = 0;
2622 	pcireg_t		id;
2623 	enum pci_bus_speed	cap = PCI_SPEED_UNKNOWN;
2624 	int			bus, device, function;
2625 
2626 	if (pdev == NULL)
2627 		return PCI_SPEED_UNKNOWN;
2628 
2629 	pc = pdev->pc;
2630 	tag = pdev->tag;
2631 
2632 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
2633 	    &pos, NULL))
2634 		return PCI_SPEED_UNKNOWN;
2635 
2636 	id = pci_conf_read(pc, tag, PCI_ID_REG);
2637 	pci_decompose_tag(pc, tag, &bus, &device, &function);
2638 
2639 	/* we've been informed via and serverworks don't make the cut */
2640 	if (PCI_VENDOR(id) == PCI_VENDOR_VIATECH ||
2641 	    PCI_VENDOR(id) == PCI_VENDOR_RCC)
2642 		return PCI_SPEED_UNKNOWN;
2643 
2644 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
2645 	xcap = pci_conf_read(pc, tag, pos + PCI_PCIE_XCAP);
2646 	if (PCI_PCIE_XCAP_VER(xcap) >= 2)
2647 		lnkcap2 = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP2);
2648 
2649 	lnkcap &= 0x0f;
2650 	lnkcap2 &= 0xfe;
2651 
2652 	if (lnkcap2) { /* PCIE GEN 3.0 */
2653 		if (lnkcap2 & 0x02)
2654 			cap = PCIE_SPEED_2_5GT;
2655 		if (lnkcap2 & 0x04)
2656 			cap = PCIE_SPEED_5_0GT;
2657 		if (lnkcap2 & 0x08)
2658 			cap = PCIE_SPEED_8_0GT;
2659 		if (lnkcap2 & 0x10)
2660 			cap = PCIE_SPEED_16_0GT;
2661 		if (lnkcap2 & 0x20)
2662 			cap = PCIE_SPEED_32_0GT;
2663 		if (lnkcap2 & 0x40)
2664 			cap = PCIE_SPEED_64_0GT;
2665 	} else {
2666 		if (lnkcap & 0x01)
2667 			cap = PCIE_SPEED_2_5GT;
2668 		if (lnkcap & 0x02)
2669 			cap = PCIE_SPEED_5_0GT;
2670 	}
2671 
2672 	DRM_INFO("probing pcie caps for device %d:%d:%d 0x%04x:0x%04x = %x/%x\n",
2673 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap,
2674 	    lnkcap2);
2675 	return cap;
2676 }
2677 
2678 enum pcie_link_width
2679 pcie_get_width_cap(struct pci_dev *pdev)
2680 {
2681 	pci_chipset_tag_t	pc = pdev->pc;
2682 	pcitag_t		tag = pdev->tag;
2683 	int			pos ;
2684 	pcireg_t		lnkcap = 0;
2685 	pcireg_t		id;
2686 	int			bus, device, function;
2687 
2688 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
2689 	    &pos, NULL))
2690 		return PCIE_LNK_WIDTH_UNKNOWN;
2691 
2692 	id = pci_conf_read(pc, tag, PCI_ID_REG);
2693 	pci_decompose_tag(pc, tag, &bus, &device, &function);
2694 
2695 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
2696 
2697 	DRM_INFO("probing pcie width for device %d:%d:%d 0x%04x:0x%04x = %x\n",
2698 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap);
2699 
2700 	if (lnkcap)
2701 		return (lnkcap & 0x3f0) >> 4;
2702 	return PCIE_LNK_WIDTH_UNKNOWN;
2703 }
2704 
2705 bool
2706 pcie_aspm_enabled(struct pci_dev *pdev)
2707 {
2708 	pci_chipset_tag_t	pc = pdev->pc;
2709 	pcitag_t		tag = pdev->tag;
2710 	int			pos ;
2711 	pcireg_t		lcsr;
2712 
2713 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
2714 	    &pos, NULL))
2715 		return false;
2716 
2717 	lcsr = pci_conf_read(pc, tag, pos + PCI_PCIE_LCSR);
2718 	if ((lcsr & (PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1)) != 0)
2719 		return true;
2720 
2721 	return false;
2722 }
2723 
2724 static wait_queue_head_t bit_waitq;
2725 wait_queue_head_t var_waitq;
2726 struct mutex wait_bit_mtx = MUTEX_INITIALIZER(IPL_TTY);
2727 
2728 int
2729 wait_on_bit(unsigned long *word, int bit, unsigned mode)
2730 {
2731 	int err;
2732 
2733 	if (!test_bit(bit, word))
2734 		return 0;
2735 
2736 	mtx_enter(&wait_bit_mtx);
2737 	while (test_bit(bit, word)) {
2738 		err = msleep_nsec(word, &wait_bit_mtx, PWAIT | mode, "wtb",
2739 		    INFSLP);
2740 		if (err) {
2741 			mtx_leave(&wait_bit_mtx);
2742 			return 1;
2743 		}
2744 	}
2745 	mtx_leave(&wait_bit_mtx);
2746 	return 0;
2747 }
2748 
2749 int
2750 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode, int timo)
2751 {
2752 	int err;
2753 
2754 	if (!test_bit(bit, word))
2755 		return 0;
2756 
2757 	mtx_enter(&wait_bit_mtx);
2758 	while (test_bit(bit, word)) {
2759 		err = msleep(word, &wait_bit_mtx, PWAIT | mode, "wtb", timo);
2760 		if (err) {
2761 			mtx_leave(&wait_bit_mtx);
2762 			return 1;
2763 		}
2764 	}
2765 	mtx_leave(&wait_bit_mtx);
2766 	return 0;
2767 }
2768 
2769 void
2770 wake_up_bit(void *word, int bit)
2771 {
2772 	mtx_enter(&wait_bit_mtx);
2773 	wakeup(word);
2774 	mtx_leave(&wait_bit_mtx);
2775 }
2776 
2777 void
2778 clear_and_wake_up_bit(int bit, void *word)
2779 {
2780 	clear_bit(bit, word);
2781 	wake_up_bit(word, bit);
2782 }
2783 
2784 wait_queue_head_t *
2785 bit_waitqueue(void *word, int bit)
2786 {
2787 	/* XXX hash table of wait queues? */
2788 	return &bit_waitq;
2789 }
2790 
2791 wait_queue_head_t *
2792 __var_waitqueue(void *p)
2793 {
2794 	/* XXX hash table of wait queues? */
2795 	return &bit_waitq;
2796 }
2797 
2798 struct workqueue_struct *system_wq;
2799 struct workqueue_struct *system_highpri_wq;
2800 struct workqueue_struct *system_unbound_wq;
2801 struct workqueue_struct *system_long_wq;
2802 struct taskq *taskletq;
2803 
2804 void
2805 drm_linux_init(void)
2806 {
2807 	system_wq = (struct workqueue_struct *)
2808 	    taskq_create("drmwq", 4, IPL_HIGH, 0);
2809 	system_highpri_wq = (struct workqueue_struct *)
2810 	    taskq_create("drmhpwq", 4, IPL_HIGH, 0);
2811 	system_unbound_wq = (struct workqueue_struct *)
2812 	    taskq_create("drmubwq", 4, IPL_HIGH, 0);
2813 	system_long_wq = (struct workqueue_struct *)
2814 	    taskq_create("drmlwq", 4, IPL_HIGH, 0);
2815 
2816 	taskletq = taskq_create("drmtskl", 1, IPL_HIGH, 0);
2817 
2818 	init_waitqueue_head(&bit_waitq);
2819 	init_waitqueue_head(&var_waitq);
2820 
2821 	pool_init(&idr_pool, sizeof(struct idr_entry), 0, IPL_TTY, 0,
2822 	    "idrpl", NULL);
2823 
2824 	kmap_atomic_va =
2825 	    (vaddr_t)km_alloc(PAGE_SIZE, &kv_any, &kp_none, &kd_waitok);
2826 }
2827 
2828 void
2829 drm_linux_exit(void)
2830 {
2831 	pool_destroy(&idr_pool);
2832 
2833 	taskq_destroy(taskletq);
2834 
2835 	taskq_destroy((struct taskq *)system_long_wq);
2836 	taskq_destroy((struct taskq *)system_unbound_wq);
2837 	taskq_destroy((struct taskq *)system_highpri_wq);
2838 	taskq_destroy((struct taskq *)system_wq);
2839 }
2840 
2841 #define PCIE_ECAP_RESIZE_BAR	0x15
2842 #define RBCAP0			0x04
2843 #define RBCTRL0			0x08
2844 #define RBCTRL_BARINDEX_MASK	0x07
2845 #define RBCTRL_BARSIZE_MASK	0x1f00
2846 #define RBCTRL_BARSIZE_SHIFT	8
2847 
2848 /* size in MB is 1 << nsize */
2849 int
2850 pci_resize_resource(struct pci_dev *pdev, int bar, int nsize)
2851 {
2852 	pcireg_t	reg;
2853 	uint32_t	offset, capid;
2854 
2855 	KASSERT(bar == 0);
2856 
2857 	offset = PCI_PCIE_ECAP;
2858 
2859 	/* search PCI Express Extended Capabilities */
2860 	do {
2861 		reg = pci_conf_read(pdev->pc, pdev->tag, offset);
2862 		capid = PCI_PCIE_ECAP_ID(reg);
2863 		if (capid == PCIE_ECAP_RESIZE_BAR)
2864 			break;
2865 		offset = PCI_PCIE_ECAP_NEXT(reg);
2866 	} while (capid != 0);
2867 
2868 	if (capid == 0) {
2869 		printf("%s: could not find resize bar cap!\n", __func__);
2870 		return -ENOTSUP;
2871 	}
2872 
2873 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCAP0);
2874 
2875 	if ((reg & (1 << (nsize + 4))) == 0) {
2876 		printf("%s size not supported\n", __func__);
2877 		return -ENOTSUP;
2878 	}
2879 
2880 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCTRL0);
2881 	if ((reg & RBCTRL_BARINDEX_MASK) != 0) {
2882 		printf("%s BAR index not 0\n", __func__);
2883 		return -EINVAL;
2884 	}
2885 
2886 	reg &= ~RBCTRL_BARSIZE_MASK;
2887 	reg |= (nsize << RBCTRL_BARSIZE_SHIFT) & RBCTRL_BARSIZE_MASK;
2888 
2889 	pci_conf_write(pdev->pc, pdev->tag, offset + RBCTRL0, reg);
2890 
2891 	return 0;
2892 }
2893 
2894 TAILQ_HEAD(, shrinker) shrinkers = TAILQ_HEAD_INITIALIZER(shrinkers);
2895 
2896 int
2897 register_shrinker(struct shrinker *shrinker, const char *format, ...)
2898 {
2899 	TAILQ_INSERT_TAIL(&shrinkers, shrinker, next);
2900 	return 0;
2901 }
2902 
2903 void
2904 unregister_shrinker(struct shrinker *shrinker)
2905 {
2906 	TAILQ_REMOVE(&shrinkers, shrinker, next);
2907 }
2908 
2909 void
2910 drmbackoff(long npages)
2911 {
2912 	struct shrink_control sc;
2913 	struct shrinker *shrinker;
2914 	u_long ret;
2915 
2916 	shrinker = TAILQ_FIRST(&shrinkers);
2917 	while (shrinker && npages > 0) {
2918 		sc.nr_to_scan = npages;
2919 		ret = shrinker->scan_objects(shrinker, &sc);
2920 		npages -= ret;
2921 		shrinker = TAILQ_NEXT(shrinker, next);
2922 	}
2923 }
2924 
2925 void *
2926 bitmap_zalloc(u_int n, gfp_t flags)
2927 {
2928 	return kcalloc(BITS_TO_LONGS(n), sizeof(long), flags);
2929 }
2930 
2931 void
2932 bitmap_free(void *p)
2933 {
2934 	kfree(p);
2935 }
2936 
2937 int
2938 atomic_dec_and_mutex_lock(volatile int *v, struct rwlock *lock)
2939 {
2940 	if (atomic_add_unless(v, -1, 1))
2941 		return 0;
2942 
2943 	rw_enter_write(lock);
2944 	if (atomic_dec_return(v) == 0)
2945 		return 1;
2946 	rw_exit_write(lock);
2947 	return 0;
2948 }
2949 
2950 int
2951 printk(const char *fmt, ...)
2952 {
2953 	int ret, level;
2954 	va_list ap;
2955 
2956 	if (fmt != NULL && *fmt == '\001') {
2957 		level = fmt[1];
2958 #ifndef DRMDEBUG
2959 		if (level >= KERN_INFO[1] && level <= '9')
2960 			return 0;
2961 #endif
2962 		fmt += 2;
2963 	}
2964 
2965 	va_start(ap, fmt);
2966 	ret = vprintf(fmt, ap);
2967 	va_end(ap);
2968 
2969 	return ret;
2970 }
2971 
2972 #define START(node) ((node)->start)
2973 #define LAST(node) ((node)->last)
2974 
2975 struct interval_tree_node *
2976 interval_tree_iter_first(struct rb_root_cached *root, unsigned long start,
2977     unsigned long last)
2978 {
2979 	struct interval_tree_node *node;
2980 	struct rb_node *rb;
2981 
2982 	for (rb = rb_first_cached(root); rb; rb = rb_next(rb)) {
2983 		node = rb_entry(rb, typeof(*node), rb);
2984 		if (LAST(node) >= start && START(node) <= last)
2985 			return node;
2986 	}
2987 	return NULL;
2988 }
2989 
2990 void
2991 interval_tree_remove(struct interval_tree_node *node,
2992     struct rb_root_cached *root)
2993 {
2994 	rb_erase_cached(&node->rb, root);
2995 }
2996 
2997 void
2998 interval_tree_insert(struct interval_tree_node *node,
2999     struct rb_root_cached *root)
3000 {
3001 	struct rb_node **iter = &root->rb_root.rb_node;
3002 	struct rb_node *parent = NULL;
3003 	struct interval_tree_node *iter_node;
3004 
3005 	while (*iter) {
3006 		parent = *iter;
3007 		iter_node = rb_entry(*iter, struct interval_tree_node, rb);
3008 
3009 		if (node->start < iter_node->start)
3010 			iter = &(*iter)->rb_left;
3011 		else
3012 			iter = &(*iter)->rb_right;
3013 	}
3014 
3015 	rb_link_node(&node->rb, parent, iter);
3016 	rb_insert_color_cached(&node->rb, root, false);
3017 }
3018 
3019 int
3020 syncfile_read(struct file *fp, struct uio *uio, int fflags)
3021 {
3022 	return ENXIO;
3023 }
3024 
3025 int
3026 syncfile_write(struct file *fp, struct uio *uio, int fflags)
3027 {
3028 	return ENXIO;
3029 }
3030 
3031 int
3032 syncfile_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
3033 {
3034 	return ENOTTY;
3035 }
3036 
3037 int
3038 syncfile_kqfilter(struct file *fp, struct knote *kn)
3039 {
3040 	return EINVAL;
3041 }
3042 
3043 int
3044 syncfile_stat(struct file *fp, struct stat *st, struct proc *p)
3045 {
3046 	memset(st, 0, sizeof(*st));
3047 	st->st_mode = S_IFIFO;	/* XXX */
3048 	return 0;
3049 }
3050 
3051 int
3052 syncfile_close(struct file *fp, struct proc *p)
3053 {
3054 	struct sync_file *sf = fp->f_data;
3055 
3056 	dma_fence_put(sf->fence);
3057 	fp->f_data = NULL;
3058 	free(sf, M_DRM, sizeof(struct sync_file));
3059 	return 0;
3060 }
3061 
3062 int
3063 syncfile_seek(struct file *fp, off_t *offset, int whence, struct proc *p)
3064 {
3065 	off_t newoff;
3066 
3067 	if (*offset != 0)
3068 		return EINVAL;
3069 
3070 	switch (whence) {
3071 	case SEEK_SET:
3072 		newoff = 0;
3073 		break;
3074 	case SEEK_END:
3075 		newoff = 0;
3076 		break;
3077 	default:
3078 		return EINVAL;
3079 	}
3080 	mtx_enter(&fp->f_mtx);
3081 	fp->f_offset = newoff;
3082 	mtx_leave(&fp->f_mtx);
3083 	*offset = newoff;
3084 	return 0;
3085 }
3086 
3087 const struct fileops syncfileops = {
3088 	.fo_read	= syncfile_read,
3089 	.fo_write	= syncfile_write,
3090 	.fo_ioctl	= syncfile_ioctl,
3091 	.fo_kqfilter	= syncfile_kqfilter,
3092 	.fo_stat	= syncfile_stat,
3093 	.fo_close	= syncfile_close,
3094 	.fo_seek	= syncfile_seek,
3095 };
3096 
3097 void
3098 fd_install(int fd, struct file *fp)
3099 {
3100 	struct proc *p = curproc;
3101 	struct filedesc *fdp = p->p_fd;
3102 
3103 	if (fp->f_type != DTYPE_SYNC)
3104 		return;
3105 
3106 	fdplock(fdp);
3107 	/* all callers use get_unused_fd_flags(O_CLOEXEC) */
3108 	fdinsert(fdp, fd, UF_EXCLOSE, fp);
3109 	fdpunlock(fdp);
3110 }
3111 
3112 void
3113 fput(struct file *fp)
3114 {
3115 	if (fp->f_type != DTYPE_SYNC)
3116 		return;
3117 
3118 	FRELE(fp, curproc);
3119 }
3120 
3121 int
3122 get_unused_fd_flags(unsigned int flags)
3123 {
3124 	struct proc *p = curproc;
3125 	struct filedesc *fdp = p->p_fd;
3126 	int error, fd;
3127 
3128 	KASSERT((flags & O_CLOEXEC) != 0);
3129 
3130 	fdplock(fdp);
3131 retryalloc:
3132 	if ((error = fdalloc(p, 0, &fd)) != 0) {
3133 		if (error == ENOSPC) {
3134 			fdexpand(p);
3135 			goto retryalloc;
3136 		}
3137 		fdpunlock(fdp);
3138 		return -1;
3139 	}
3140 	fdpunlock(fdp);
3141 
3142 	return fd;
3143 }
3144 
3145 void
3146 put_unused_fd(int fd)
3147 {
3148 	struct filedesc *fdp = curproc->p_fd;
3149 
3150 	fdplock(fdp);
3151 	fdremove(fdp, fd);
3152 	fdpunlock(fdp);
3153 }
3154 
3155 struct dma_fence *
3156 sync_file_get_fence(int fd)
3157 {
3158 	struct proc *p = curproc;
3159 	struct filedesc *fdp = p->p_fd;
3160 	struct file *fp;
3161 	struct sync_file *sf;
3162 	struct dma_fence *f;
3163 
3164 	if ((fp = fd_getfile(fdp, fd)) == NULL)
3165 		return NULL;
3166 
3167 	if (fp->f_type != DTYPE_SYNC) {
3168 		FRELE(fp, p);
3169 		return NULL;
3170 	}
3171 	sf = fp->f_data;
3172 	f = dma_fence_get(sf->fence);
3173 	FRELE(sf->file, p);
3174 	return f;
3175 }
3176 
3177 struct sync_file *
3178 sync_file_create(struct dma_fence *fence)
3179 {
3180 	struct proc *p = curproc;
3181 	struct sync_file *sf;
3182 	struct file *fp;
3183 
3184 	fp = fnew(p);
3185 	if (fp == NULL)
3186 		return NULL;
3187 	fp->f_type = DTYPE_SYNC;
3188 	fp->f_ops = &syncfileops;
3189 	sf = malloc(sizeof(struct sync_file), M_DRM, M_WAITOK | M_ZERO);
3190 	sf->file = fp;
3191 	sf->fence = dma_fence_get(fence);
3192 	fp->f_data = sf;
3193 	return sf;
3194 }
3195 
3196 bool
3197 drm_firmware_drivers_only(void)
3198 {
3199 	return false;
3200 }
3201 
3202 
3203 void *
3204 memremap(phys_addr_t phys_addr, size_t size, int flags)
3205 {
3206 	STUB();
3207 	return NULL;
3208 }
3209 
3210 void
3211 memunmap(void *addr)
3212 {
3213 	STUB();
3214 }
3215 
3216 #include <linux/platform_device.h>
3217 
3218 bus_dma_tag_t
3219 dma_tag_lookup(struct device *dev)
3220 {
3221 	extern struct cfdriver drm_cd;
3222 	struct drm_device *drm;
3223 	int i;
3224 
3225 	for (i = 0; i < drm_cd.cd_ndevs; i++) {
3226 		drm = drm_cd.cd_devs[i];
3227 		if (drm && drm->dev == dev)
3228 			return drm->dmat;
3229 	}
3230 
3231 	return ((struct platform_device *)dev)->dmat;
3232 }
3233 
3234 LIST_HEAD(, drm_dmamem) dmamem_list = LIST_HEAD_INITIALIZER(dmamem_list);
3235 
3236 void *
3237 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
3238     int gfp)
3239 {
3240 	bus_dma_tag_t dmat = dma_tag_lookup(dev);
3241 	struct drm_dmamem *mem;
3242 
3243 	mem = drm_dmamem_alloc(dmat, size, PAGE_SIZE, 1, size,
3244 	    BUS_DMA_COHERENT, 0);
3245 	if (mem == NULL)
3246 		return NULL;
3247 	*dma_handle = mem->map->dm_segs[0].ds_addr;
3248 	LIST_INSERT_HEAD(&dmamem_list, mem, next);
3249 	return mem->kva;
3250 }
3251 
3252 void
3253 dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
3254     dma_addr_t dma_handle)
3255 {
3256 	bus_dma_tag_t dmat = dma_tag_lookup(dev);
3257 	struct drm_dmamem *mem;
3258 
3259 	LIST_FOREACH(mem, &dmamem_list, next) {
3260 		if (mem->kva == cpu_addr)
3261 			break;
3262 	}
3263 	KASSERT(mem);
3264 	KASSERT(mem->size == size);
3265 	KASSERT(mem->map->dm_segs[0].ds_addr == dma_handle);
3266 
3267 	LIST_REMOVE(mem, next);
3268 	drm_dmamem_free(dmat, mem);
3269 }
3270 
3271 int
3272 dma_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
3273     dma_addr_t dma_addr, size_t size)
3274 {
3275 	paddr_t pa;
3276 	int ret;
3277 
3278 	if (!pmap_extract(pmap_kernel(), (vaddr_t)cpu_addr, &pa))
3279 		return -EINVAL;
3280 
3281 	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
3282 	if (ret)
3283 		return ret;
3284 
3285 	sg_set_page(sgt->sgl, PHYS_TO_VM_PAGE(pa), size, 0);
3286 	return 0;
3287 }
3288 
3289 dma_addr_t
3290 dma_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size,
3291     enum dma_data_direction dir, u_long attr)
3292 {
3293 	bus_dma_tag_t dmat= dma_tag_lookup(dev);
3294 	bus_dmamap_t map;
3295 	bus_dma_segment_t seg;
3296 
3297 	if (bus_dmamap_create(dmat, size, 1, size, 0,
3298 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &map))
3299 		return DMA_MAPPING_ERROR;
3300 	seg.ds_addr = phys_addr;
3301 	seg.ds_len = size;
3302 	if (bus_dmamap_load_raw(dmat, map, &seg, 1, size, BUS_DMA_WAITOK)) {
3303 		bus_dmamap_destroy(dmat, map);
3304 		return DMA_MAPPING_ERROR;
3305 	}
3306 
3307 	return map->dm_segs[0].ds_addr;
3308 }
3309 
3310 #ifdef BUS_DMA_FIXED
3311 
3312 #include <linux/iommu.h>
3313 
3314 size_t
3315 iommu_map_sgtable(struct iommu_domain *domain, u_long iova,
3316     struct sg_table *sgt, int prot)
3317 {
3318 	bus_dma_segment_t seg;
3319 	int error;
3320 
3321 	error = bus_dmamap_create(domain->dmat, sgt->sgl->length, 1,
3322 	    sgt->sgl->length, 0, BUS_DMA_WAITOK, &sgt->dmamap);
3323 	if (error)
3324 		return -ENOMEM;
3325 
3326 	sgt->dmamap->dm_segs[0].ds_addr = iova;
3327 	sgt->dmamap->dm_segs[0].ds_len = sgt->sgl->length;
3328 	sgt->dmamap->dm_nsegs = 1;
3329 	seg.ds_addr = VM_PAGE_TO_PHYS(sgt->sgl->__page);
3330 	seg.ds_len = sgt->sgl->length;
3331 	error = bus_dmamap_load_raw(domain->dmat, sgt->dmamap, &seg, 1,
3332 	    sgt->sgl->length, BUS_DMA_WAITOK | BUS_DMA_FIXED);
3333 	if (error)
3334 		return -ENOMEM;
3335 
3336 	return sg_dma_len(sgt->sgl);
3337 }
3338 
3339 size_t
3340 iommu_unmap(struct iommu_domain *domain, u_long iova, size_t size)
3341 {
3342 	STUB();
3343 	return 0;
3344 }
3345 
3346 struct iommu_domain *
3347 iommu_get_domain_for_dev(struct device *dev)
3348 {
3349 	STUB();
3350 	return NULL;
3351 }
3352 
3353 phys_addr_t
3354 iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
3355 {
3356 	STUB();
3357 	return 0;
3358 }
3359 
3360 struct iommu_domain *
3361 iommu_domain_alloc(struct bus_type *type)
3362 {
3363 	return malloc(sizeof(struct iommu_domain), M_DEVBUF, M_WAITOK | M_ZERO);
3364 }
3365 
3366 int
3367 iommu_attach_device(struct iommu_domain *domain, struct device *dev)
3368 {
3369 	struct platform_device *pdev = (struct platform_device *)dev;
3370 
3371 	domain->dmat = pdev->dmat;
3372 	return 0;
3373 }
3374 
3375 #endif
3376 
3377 #include <linux/component.h>
3378 
3379 struct component {
3380 	struct device *dev;
3381 	struct device *adev;
3382 	const struct component_ops *ops;
3383 	SLIST_ENTRY(component) next;
3384 };
3385 
3386 SLIST_HEAD(,component) component_list = SLIST_HEAD_INITIALIZER(component_list);
3387 
3388 int
3389 component_add(struct device *dev, const struct component_ops *ops)
3390 {
3391 	struct component *component;
3392 
3393 	component = malloc(sizeof(*component), M_DEVBUF, M_WAITOK | M_ZERO);
3394 	component->dev = dev;
3395 	component->ops = ops;
3396 	SLIST_INSERT_HEAD(&component_list, component, next);
3397 	return 0;
3398 }
3399 
3400 int
3401 component_add_typed(struct device *dev, const struct component_ops *ops,
3402 	int type)
3403 {
3404 	return component_add(dev, ops);
3405 }
3406 
3407 int
3408 component_bind_all(struct device *dev, void *data)
3409 {
3410 	struct component *component;
3411 	int ret = 0;
3412 
3413 	SLIST_FOREACH(component, &component_list, next) {
3414 		if (component->adev == dev) {
3415 			ret = component->ops->bind(component->dev, NULL, data);
3416 			if (ret)
3417 				break;
3418 		}
3419 	}
3420 
3421 	return ret;
3422 }
3423 
3424 struct component_match_entry {
3425 	int (*compare)(struct device *, void *);
3426 	void *data;
3427 };
3428 
3429 struct component_match {
3430 	struct component_match_entry match[4];
3431 	int nmatches;
3432 };
3433 
3434 int
3435 component_master_add_with_match(struct device *dev,
3436     const struct component_master_ops *ops, struct component_match *match)
3437 {
3438 	struct component *component;
3439 	int found = 0;
3440 	int i, ret;
3441 
3442 	SLIST_FOREACH(component, &component_list, next) {
3443 		for (i = 0; i < match->nmatches; i++) {
3444 			struct component_match_entry *m = &match->match[i];
3445 			if (m->compare(component->dev, m->data)) {
3446 				component->adev = dev;
3447 				found = 1;
3448 				break;
3449 			}
3450 		}
3451 	}
3452 
3453 	if (found) {
3454 		ret = ops->bind(dev);
3455 		if (ret)
3456 			return ret;
3457 	}
3458 
3459 	return 0;
3460 }
3461 
3462 #ifdef __HAVE_FDT
3463 
3464 #include <linux/platform_device.h>
3465 #include <dev/ofw/openfirm.h>
3466 #include <dev/ofw/fdt.h>
3467 #include <machine/fdt.h>
3468 
3469 LIST_HEAD(, platform_device) pdev_list = LIST_HEAD_INITIALIZER(pdev_list);
3470 
3471 void
3472 platform_device_register(struct platform_device *pdev)
3473 {
3474 	int i;
3475 
3476 	pdev->num_resources = pdev->faa->fa_nreg;
3477 	if (pdev->faa->fa_nreg > 0) {
3478 		pdev->resource = mallocarray(pdev->faa->fa_nreg,
3479 		    sizeof(*pdev->resource), M_DEVBUF, M_WAITOK | M_ZERO);
3480 		for (i = 0; i < pdev->faa->fa_nreg; i++) {
3481 			pdev->resource[i].start = pdev->faa->fa_reg[i].addr;
3482 			pdev->resource[i].end = pdev->faa->fa_reg[i].addr +
3483 			    pdev->faa->fa_reg[i].size - 1;
3484 		}
3485 	}
3486 
3487 	pdev->parent = pdev->dev.dv_parent;
3488 	pdev->node = pdev->faa->fa_node;
3489 	pdev->iot = pdev->faa->fa_iot;
3490 	pdev->dmat = pdev->faa->fa_dmat;
3491 	LIST_INSERT_HEAD(&pdev_list, pdev, next);
3492 }
3493 
3494 
3495 struct resource *
3496 platform_get_resource(struct platform_device *pdev, u_int type, u_int num)
3497 {
3498 	KASSERT(num < pdev->num_resources);
3499 	return &pdev->resource[num];
3500 }
3501 
3502 void __iomem *
3503 devm_platform_ioremap_resource_byname(struct platform_device *pdev,
3504 				      const char *name)
3505 {
3506 	bus_space_handle_t ioh;
3507 	int err, idx;
3508 
3509 	idx = OF_getindex(pdev->node, name, "reg-names");
3510 	if (idx == -1 || idx >= pdev->num_resources)
3511 		return ERR_PTR(-EINVAL);
3512 
3513 	err = bus_space_map(pdev->iot, pdev->resource[idx].start,
3514 	    pdev->resource[idx].end - pdev->resource[idx].start + 1,
3515 	    BUS_SPACE_MAP_LINEAR, &ioh);
3516 	if (err)
3517 		return ERR_PTR(-err);
3518 
3519 	return bus_space_vaddr(pdev->iot, ioh);
3520 }
3521 
3522 #include <dev/ofw/ofw_clock.h>
3523 #include <linux/clk.h>
3524 
3525 struct clk *
3526 devm_clk_get(struct device *dev, const char *name)
3527 {
3528 	struct platform_device *pdev = (struct platform_device *)dev;
3529 	struct clk *clk;
3530 
3531 	clk = malloc(sizeof(*clk), M_DEVBUF, M_WAITOK);
3532 	clk->freq = clock_get_frequency(pdev->node, name);
3533 	return clk;
3534 }
3535 
3536 u_long
3537 clk_get_rate(struct clk *clk)
3538 {
3539 	return clk->freq;
3540 }
3541 
3542 #include <linux/gpio/consumer.h>
3543 #include <dev/ofw/ofw_gpio.h>
3544 
3545 struct gpio_desc {
3546 	uint32_t gpios[4];
3547 };
3548 
3549 struct gpio_desc *
3550 devm_gpiod_get_optional(struct device *dev, const char *name, int flags)
3551 {
3552 	struct platform_device *pdev = (struct platform_device *)dev;
3553 	struct gpio_desc *desc;
3554 	char fullname[128];
3555 	int len;
3556 
3557 	snprintf(fullname, sizeof(fullname), "%s-gpios", name);
3558 
3559 	desc = malloc(sizeof(*desc), M_DEVBUF, M_WAITOK | M_ZERO);
3560 	len = OF_getpropintarray(pdev->node, fullname, desc->gpios,
3561 	     sizeof(desc->gpios));
3562 	KASSERT(len <= sizeof(desc->gpios));
3563 	if (len < 0) {
3564 		free(desc, M_DEVBUF, sizeof(*desc));
3565 		return NULL;
3566 	}
3567 
3568 	switch (flags) {
3569 	case GPIOD_IN:
3570 		gpio_controller_config_pin(desc->gpios, GPIO_CONFIG_INPUT);
3571 		break;
3572 	case GPIOD_OUT_HIGH:
3573 		gpio_controller_config_pin(desc->gpios, GPIO_CONFIG_OUTPUT);
3574 		gpio_controller_set_pin(desc->gpios, 1);
3575 		break;
3576 	default:
3577 		panic("%s: unimplemented flags 0x%x", __func__, flags);
3578 	}
3579 
3580 	return desc;
3581 }
3582 
3583 int
3584 gpiod_get_value_cansleep(const struct gpio_desc *desc)
3585 {
3586 	return gpio_controller_get_pin(((struct gpio_desc *)desc)->gpios);
3587 }
3588 
3589 struct phy {
3590 	int node;
3591 	const char *name;
3592 };
3593 
3594 struct phy *
3595 devm_phy_optional_get(struct device *dev, const char *name)
3596 {
3597 	struct platform_device *pdev = (struct platform_device *)dev;
3598 	struct phy *phy;
3599 	int idx;
3600 
3601 	idx = OF_getindex(pdev->node, name, "phy-names");
3602 	if (idx == -1)
3603 		return NULL;
3604 
3605 	phy = malloc(sizeof(*phy), M_DEVBUF, M_WAITOK);
3606 	phy->node = pdev->node;
3607 	phy->name = name;
3608 
3609 	return phy;
3610 }
3611 
3612 struct bus_type platform_bus_type;
3613 
3614 #include <dev/ofw/ofw_misc.h>
3615 
3616 #include <linux/of.h>
3617 #include <linux/platform_device.h>
3618 
3619 struct device_node *
3620 __of_devnode(void *arg)
3621 {
3622 	struct device *dev = container_of(arg, struct device, of_node);
3623 	struct platform_device *pdev = (struct platform_device *)dev;
3624 
3625 	return (struct device_node *)(uintptr_t)pdev->node;
3626 }
3627 
3628 int
3629 __of_device_is_compatible(struct device_node *np, const char *compatible)
3630 {
3631 	return OF_is_compatible((uintptr_t)np, compatible);
3632 }
3633 
3634 int
3635 __of_property_present(struct device_node *np, const char *propname)
3636 {
3637 	return OF_getpropbool((uintptr_t)np, (char *)propname);
3638 }
3639 
3640 int
3641 __of_property_read_variable_u32_array(struct device_node *np,
3642     const char *propname, uint32_t *out_values, size_t sz_min, size_t sz_max)
3643 {
3644 	int len;
3645 
3646 	len = OF_getpropintarray((uintptr_t)np, (char *)propname, out_values,
3647 	    sz_max * sizeof(*out_values));
3648 	if (len < 0)
3649 		return -EINVAL;
3650 	if (len == 0)
3651 		return -ENODATA;
3652 	if (len < sz_min * sizeof(*out_values) ||
3653 	    len > sz_max * sizeof(*out_values))
3654 		return -EOVERFLOW;
3655 	if (sz_min == 1 && sz_max == 1)
3656 		return 0;
3657 	return len / sizeof(*out_values);
3658 }
3659 
3660 int
3661 __of_property_read_variable_u64_array(struct device_node *np,
3662     const char *propname, uint64_t *out_values, size_t sz_min, size_t sz_max)
3663 {
3664 	int len;
3665 
3666 	len = OF_getpropint64array((uintptr_t)np, (char *)propname, out_values,
3667 	    sz_max * sizeof(*out_values));
3668 	if (len < 0)
3669 		return -EINVAL;
3670 	if (len == 0)
3671 		return -ENODATA;
3672 	if (len < sz_min * sizeof(*out_values) ||
3673 	    len > sz_max * sizeof(*out_values))
3674 		return -EOVERFLOW;
3675 	if (sz_min == 1 && sz_max == 1)
3676 		return 0;
3677 	return len / sizeof(*out_values);
3678 }
3679 
3680 int
3681 __of_property_match_string(struct device_node *np,
3682     const char *propname, const char *str)
3683 {
3684 	int idx;
3685 
3686 	idx = OF_getindex((uintptr_t)np, str, propname);
3687 	if (idx == -1)
3688 		return -ENODATA;
3689 	return idx;
3690 }
3691 
3692 struct device_node *
3693 __of_parse_phandle(struct device_node *np, const char *propname, int idx)
3694 {
3695 	uint32_t phandles[16] = {};
3696 	int len, node;
3697 
3698 	len = OF_getpropintarray((uintptr_t)np, (char *)propname, phandles,
3699 	    sizeof(phandles));
3700 	if (len < (idx + 1) * sizeof(uint32_t))
3701 		return NULL;
3702 
3703 	node = OF_getnodebyphandle(phandles[idx]);
3704 	if (node == 0)
3705 		return NULL;
3706 
3707 	return (struct device_node *)(uintptr_t)node;
3708 }
3709 
3710 int
3711 __of_parse_phandle_with_args(struct device_node *np, const char *propname,
3712     const char *cellsname, int idx, struct of_phandle_args *args)
3713 {
3714 	uint32_t phandles[16] = {};
3715 	int i, len, node;
3716 
3717 	len = OF_getpropintarray((uintptr_t)np, (char *)propname, phandles,
3718 	    sizeof(phandles));
3719 	if (len < (idx + 1) * sizeof(uint32_t))
3720 		return -ENOENT;
3721 
3722 	node = OF_getnodebyphandle(phandles[idx]);
3723 	if (node == 0)
3724 		return -ENOENT;
3725 
3726 	args->np = (struct device_node *)(uintptr_t)node;
3727 	args->args_count = OF_getpropint(node, (char *)cellsname, 0);
3728 	for (i = 0; i < args->args_count; i++)
3729 		args->args[i] = phandles[i + 1];
3730 
3731 	return 0;
3732 }
3733 
3734 int
3735 of_address_to_resource(struct device_node *np, int idx, struct resource *res)
3736 {
3737 	uint64_t reg[16] = {};
3738 	int len;
3739 
3740 	KASSERT(idx < 8);
3741 
3742 	len = OF_getpropint64array((uintptr_t)np, "reg", reg, sizeof(reg));
3743 	if (len < 0 || idx >= (len / (2 * sizeof(uint64_t))))
3744 		return -EINVAL;
3745 
3746 	res->start = reg[2 * idx];
3747 	res->end = reg[2 * idx] + reg[2 * idx + 1] - 1;
3748 
3749 	return 0;
3750 }
3751 
3752 static int
3753 next_node(int node)
3754 {
3755 	int peer = OF_peer(node);
3756 
3757 	while (node && !peer) {
3758 		node = OF_parent(node);
3759 		if (node)
3760 			peer = OF_peer(node);
3761 	}
3762 
3763 	return peer;
3764 }
3765 
3766 static int
3767 find_matching_node(int node, const struct of_device_id *id)
3768 {
3769 	int child, match;
3770 	int i;
3771 
3772 	for (child = OF_child(node); child; child = OF_peer(child)) {
3773 		match = find_matching_node(child, id);
3774 		if (match)
3775 			return match;
3776 	}
3777 
3778 	for (i = 0; id[i].compatible; i++) {
3779 		if (OF_is_compatible(node, id[i].compatible))
3780 			return node;
3781 	}
3782 
3783 	return 0;
3784 }
3785 
3786 struct device_node *
3787 __matching_node(struct device_node *np, const struct of_device_id *id)
3788 {
3789 	int node = OF_peer(0);
3790 	int match;
3791 
3792 	if (np)
3793 		node = next_node((uintptr_t)np);
3794 	while (node) {
3795 		match = find_matching_node(node, id);
3796 		if (match)
3797 			return (struct device_node *)(uintptr_t)match;
3798 		node = next_node(node);
3799 	}
3800 
3801 	return NULL;
3802 }
3803 
3804 struct platform_device *
3805 of_platform_device_create(struct device_node *np, const char *bus_id,
3806     struct device *parent)
3807 {
3808 	struct platform_device *pdev;
3809 
3810 	pdev = malloc(sizeof(*pdev), M_DEVBUF, M_WAITOK | M_ZERO);
3811 	pdev->node = (intptr_t)np;
3812 	pdev->parent = parent;
3813 
3814 	LIST_INSERT_HEAD(&pdev_list, pdev, next);
3815 
3816 	return pdev;
3817 }
3818 
3819 struct platform_device *
3820 of_find_device_by_node(struct device_node *np)
3821 {
3822 	struct platform_device *pdev;
3823 
3824 	LIST_FOREACH(pdev, &pdev_list, next) {
3825 		if (pdev->node == (intptr_t)np)
3826 			return pdev;
3827 	}
3828 
3829 	return NULL;
3830 }
3831 
3832 int
3833 of_device_is_available(struct device_node *np)
3834 {
3835 	char status[32];
3836 
3837 	if (OF_getprop((uintptr_t)np, "status", status, sizeof(status)) > 0 &&
3838 	    strcmp(status, "disabled") == 0)
3839 		return 0;
3840 
3841 	return 1;
3842 }
3843 
3844 int
3845 of_dma_configure(struct device *dev, struct device_node *np, int force_dma)
3846 {
3847 	struct platform_device *pdev = (struct platform_device *)dev;
3848 	bus_dma_tag_t dmat = dma_tag_lookup(pdev->parent);
3849 
3850 	pdev->dmat = iommu_device_map(pdev->node, dmat);
3851 	return 0;
3852 }
3853 
3854 struct device_node *
3855 __of_get_compatible_child(void *p, const char *compat)
3856 {
3857 	struct device *dev = container_of(p, struct device, of_node);
3858 	struct platform_device *pdev = (struct platform_device *)dev;
3859 	int child;
3860 
3861 	for (child = OF_child(pdev->node); child; child = OF_peer(child)) {
3862 		if (OF_is_compatible(child, compat))
3863 			return (struct device_node *)(uintptr_t)child;
3864 	}
3865 	return NULL;
3866 }
3867 
3868 struct device_node *
3869 __of_get_child_by_name(void *p, const char *name)
3870 {
3871 	struct device *dev = container_of(p, struct device, of_node);
3872 	struct platform_device *pdev = (struct platform_device *)dev;
3873 	int child;
3874 
3875 	child = OF_getnodebyname(pdev->node, name);
3876 	if (child == 0)
3877 		return NULL;
3878 	return (struct device_node *)(uintptr_t)child;
3879 }
3880 
3881 int
3882 component_compare_of(struct device *dev, void *data)
3883 {
3884 	struct platform_device *pdev = (struct platform_device *)dev;
3885 
3886 	return (pdev->node == (intptr_t)data);
3887 }
3888 
3889 void
3890 drm_of_component_match_add(struct device *master,
3891 			   struct component_match **matchptr,
3892 			   int (*compare)(struct device *, void *),
3893 			   struct device_node *np)
3894 {
3895 	struct component_match *match = *matchptr;
3896 
3897 	if (match == NULL) {
3898 		match = malloc(sizeof(struct component_match),
3899 		    M_DEVBUF, M_WAITOK | M_ZERO);
3900 		*matchptr = match;
3901 	}
3902 
3903 	KASSERT(match->nmatches < nitems(match->match));
3904 	match->match[match->nmatches].compare = compare;
3905 	match->match[match->nmatches].data = np;
3906 	match->nmatches++;
3907 }
3908 
3909 #endif
3910