xref: /openbsd/sys/dev/pci/drm/drm_linux.c (revision a09e9584)
1 /*	$OpenBSD: drm_linux.c,v 1.113 2024/06/03 12:48:25 claudio Exp $	*/
2 /*
3  * Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org>
4  * Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/types.h>
20 #include <sys/systm.h>
21 #include <sys/param.h>
22 #include <sys/event.h>
23 #include <sys/filedesc.h>
24 #include <sys/kthread.h>
25 #include <sys/stat.h>
26 #include <sys/unistd.h>
27 #include <sys/proc.h>
28 #include <sys/pool.h>
29 #include <sys/fcntl.h>
30 
31 #include <dev/pci/ppbreg.h>
32 
33 #include <linux/dma-buf.h>
34 #include <linux/mod_devicetable.h>
35 #include <linux/acpi.h>
36 #include <linux/pagevec.h>
37 #include <linux/dma-fence-array.h>
38 #include <linux/dma-fence-chain.h>
39 #include <linux/interrupt.h>
40 #include <linux/err.h>
41 #include <linux/idr.h>
42 #include <linux/scatterlist.h>
43 #include <linux/i2c.h>
44 #include <linux/pci.h>
45 #include <linux/notifier.h>
46 #include <linux/backlight.h>
47 #include <linux/shrinker.h>
48 #include <linux/fb.h>
49 #include <linux/xarray.h>
50 #include <linux/interval_tree.h>
51 #include <linux/kthread.h>
52 #include <linux/processor.h>
53 #include <linux/sync_file.h>
54 
55 #include <drm/drm_device.h>
56 #include <drm/drm_connector.h>
57 #include <drm/drm_print.h>
58 
59 #if defined(__amd64__) || defined(__i386__)
60 #include "bios.h"
61 #endif
62 
63 /* allowed to sleep */
64 void
tasklet_unlock_wait(struct tasklet_struct * ts)65 tasklet_unlock_wait(struct tasklet_struct *ts)
66 {
67 	while (test_bit(TASKLET_STATE_RUN, &ts->state))
68 		cpu_relax();
69 }
70 
71 /* must not sleep */
72 void
tasklet_unlock_spin_wait(struct tasklet_struct * ts)73 tasklet_unlock_spin_wait(struct tasklet_struct *ts)
74 {
75 	while (test_bit(TASKLET_STATE_RUN, &ts->state))
76 		cpu_relax();
77 }
78 
79 void
tasklet_run(void * arg)80 tasklet_run(void *arg)
81 {
82 	struct tasklet_struct *ts = arg;
83 
84 	clear_bit(TASKLET_STATE_SCHED, &ts->state);
85 	if (tasklet_trylock(ts)) {
86 		if (!atomic_read(&ts->count)) {
87 			if (ts->use_callback)
88 				ts->callback(ts);
89 			else
90 				ts->func(ts->data);
91 		}
92 		tasklet_unlock(ts);
93 	}
94 }
95 
96 /* 32 bit powerpc lacks 64 bit atomics */
97 #if defined(__powerpc__) && !defined(__powerpc64__)
98 struct mutex atomic64_mtx = MUTEX_INITIALIZER(IPL_HIGH);
99 #endif
100 
101 void
set_current_state(int state)102 set_current_state(int state)
103 {
104 	int prio = state;
105 
106 	KASSERT(state != TASK_RUNNING);
107 	/* check if already on the sleep list */
108 	if (curproc->p_wchan != NULL)
109 		return;
110 	sleep_setup(curproc, prio, "schto");
111 }
112 
113 void
__set_current_state(int state)114 __set_current_state(int state)
115 {
116 	struct proc *p = curproc;
117 
118 	KASSERT(state == TASK_RUNNING);
119 	SCHED_LOCK();
120 	unsleep(p);
121 	p->p_stat = SONPROC;
122 	atomic_clearbits_int(&p->p_flag, P_WSLEEP);
123 	SCHED_UNLOCK();
124 }
125 
126 void
schedule(void)127 schedule(void)
128 {
129 	schedule_timeout(MAX_SCHEDULE_TIMEOUT);
130 }
131 
132 long
schedule_timeout(long timeout)133 schedule_timeout(long timeout)
134 {
135 	unsigned long deadline;
136 	int timo = 0;
137 
138 	KASSERT(!cold);
139 
140 	if (timeout != MAX_SCHEDULE_TIMEOUT)
141 		timo = timeout;
142 	if (timeout != MAX_SCHEDULE_TIMEOUT)
143 		deadline = jiffies + timeout;
144 	sleep_finish(timo, timeout > 0);
145 	if (timeout != MAX_SCHEDULE_TIMEOUT)
146 		timeout = deadline - jiffies;
147 
148 	return timeout > 0 ? timeout : 0;
149 }
150 
151 long
schedule_timeout_uninterruptible(long timeout)152 schedule_timeout_uninterruptible(long timeout)
153 {
154 	tsleep(curproc, PWAIT, "schtou", timeout);
155 	return 0;
156 }
157 
158 int
wake_up_process(struct proc * p)159 wake_up_process(struct proc *p)
160 {
161 	int rv;
162 
163 	SCHED_LOCK();
164 	rv = wakeup_proc(p, 0);
165 	SCHED_UNLOCK();
166 	return rv;
167 }
168 
169 int
autoremove_wake_function(struct wait_queue_entry * wqe,unsigned int mode,int sync,void * key)170 autoremove_wake_function(struct wait_queue_entry *wqe, unsigned int mode,
171     int sync, void *key)
172 {
173 	if (wqe->private)
174 		wake_up_process(wqe->private);
175 	list_del_init(&wqe->entry);
176 	return 0;
177 }
178 
179 void
prepare_to_wait(wait_queue_head_t * wqh,wait_queue_entry_t * wqe,int state)180 prepare_to_wait(wait_queue_head_t *wqh, wait_queue_entry_t *wqe, int state)
181 {
182 	mtx_enter(&wqh->lock);
183 	if (list_empty(&wqe->entry))
184 		__add_wait_queue(wqh, wqe);
185 	mtx_leave(&wqh->lock);
186 
187 	set_current_state(state);
188 }
189 
190 void
finish_wait(wait_queue_head_t * wqh,wait_queue_entry_t * wqe)191 finish_wait(wait_queue_head_t *wqh, wait_queue_entry_t *wqe)
192 {
193 	__set_current_state(TASK_RUNNING);
194 
195 	mtx_enter(&wqh->lock);
196 	if (!list_empty(&wqe->entry))
197 		list_del_init(&wqe->entry);
198 	mtx_leave(&wqh->lock);
199 }
200 
201 void
flush_workqueue(struct workqueue_struct * wq)202 flush_workqueue(struct workqueue_struct *wq)
203 {
204 	if (cold)
205 		return;
206 
207 	if (wq)
208 		taskq_barrier((struct taskq *)wq);
209 }
210 
211 bool
flush_work(struct work_struct * work)212 flush_work(struct work_struct *work)
213 {
214 	if (cold)
215 		return false;
216 
217 	if (work->tq)
218 		taskq_barrier(work->tq);
219 	return false;
220 }
221 
222 bool
flush_delayed_work(struct delayed_work * dwork)223 flush_delayed_work(struct delayed_work *dwork)
224 {
225 	bool ret = false;
226 
227 	if (cold)
228 		return false;
229 
230 	while (timeout_pending(&dwork->to)) {
231 		tsleep(dwork, PWAIT, "fldwto", 1);
232 		ret = true;
233 	}
234 
235 	if (dwork->tq)
236 		taskq_barrier(dwork->tq);
237 	return ret;
238 }
239 
240 struct kthread {
241 	int (*func)(void *);
242 	void *data;
243 	struct proc *proc;
244 	volatile u_int flags;
245 #define KTHREAD_SHOULDSTOP	0x0000001
246 #define KTHREAD_STOPPED		0x0000002
247 #define KTHREAD_SHOULDPARK	0x0000004
248 #define KTHREAD_PARKED		0x0000008
249 	LIST_ENTRY(kthread) next;
250 };
251 
252 LIST_HEAD(, kthread) kthread_list = LIST_HEAD_INITIALIZER(kthread_list);
253 
254 void
kthread_func(void * arg)255 kthread_func(void *arg)
256 {
257 	struct kthread *thread = arg;
258 	int ret;
259 
260 	ret = thread->func(thread->data);
261 	thread->flags |= KTHREAD_STOPPED;
262 	wakeup(thread);
263 	kthread_exit(ret);
264 }
265 
266 struct proc *
kthread_run(int (* func)(void *),void * data,const char * name)267 kthread_run(int (*func)(void *), void *data, const char *name)
268 {
269 	struct kthread *thread;
270 
271 	thread = malloc(sizeof(*thread), M_DRM, M_WAITOK);
272 	thread->func = func;
273 	thread->data = data;
274 	thread->flags = 0;
275 
276 	if (kthread_create(kthread_func, thread, &thread->proc, name)) {
277 		free(thread, M_DRM, sizeof(*thread));
278 		return ERR_PTR(-ENOMEM);
279 	}
280 
281 	LIST_INSERT_HEAD(&kthread_list, thread, next);
282 	return thread->proc;
283 }
284 
285 struct kthread_worker *
kthread_create_worker(unsigned int flags,const char * fmt,...)286 kthread_create_worker(unsigned int flags, const char *fmt, ...)
287 {
288 	char name[MAXCOMLEN+1];
289 	va_list ap;
290 
291 	struct kthread_worker *w = malloc(sizeof(*w), M_DRM, M_WAITOK);
292 	va_start(ap, fmt);
293 	vsnprintf(name, sizeof(name), fmt, ap);
294 	va_end(ap);
295 	w->tq = taskq_create(name, 1, IPL_HIGH, 0);
296 
297 	return w;
298 }
299 
300 void
kthread_destroy_worker(struct kthread_worker * worker)301 kthread_destroy_worker(struct kthread_worker *worker)
302 {
303 	taskq_destroy(worker->tq);
304 	free(worker, M_DRM, sizeof(*worker));
305 
306 }
307 
308 void
kthread_init_work(struct kthread_work * work,void (* func)(struct kthread_work *))309 kthread_init_work(struct kthread_work *work, void (*func)(struct kthread_work *))
310 {
311 	work->tq = NULL;
312 	task_set(&work->task, (void (*)(void *))func, work);
313 }
314 
315 bool
kthread_queue_work(struct kthread_worker * worker,struct kthread_work * work)316 kthread_queue_work(struct kthread_worker *worker, struct kthread_work *work)
317 {
318 	work->tq = worker->tq;
319 	return task_add(work->tq, &work->task);
320 }
321 
322 bool
kthread_cancel_work_sync(struct kthread_work * work)323 kthread_cancel_work_sync(struct kthread_work *work)
324 {
325 	return task_del(work->tq, &work->task);
326 }
327 
328 void
kthread_flush_work(struct kthread_work * work)329 kthread_flush_work(struct kthread_work *work)
330 {
331 	if (cold)
332 		return;
333 
334 	if (work->tq)
335 		taskq_barrier(work->tq);
336 }
337 
338 void
kthread_flush_worker(struct kthread_worker * worker)339 kthread_flush_worker(struct kthread_worker *worker)
340 {
341 	if (cold)
342 		return;
343 
344 	if (worker->tq)
345 		taskq_barrier(worker->tq);
346 }
347 
348 struct kthread *
kthread_lookup(struct proc * p)349 kthread_lookup(struct proc *p)
350 {
351 	struct kthread *thread;
352 
353 	LIST_FOREACH(thread, &kthread_list, next) {
354 		if (thread->proc == p)
355 			break;
356 	}
357 	KASSERT(thread);
358 
359 	return thread;
360 }
361 
362 int
kthread_should_park(void)363 kthread_should_park(void)
364 {
365 	struct kthread *thread = kthread_lookup(curproc);
366 	return (thread->flags & KTHREAD_SHOULDPARK);
367 }
368 
369 void
kthread_parkme(void)370 kthread_parkme(void)
371 {
372 	struct kthread *thread = kthread_lookup(curproc);
373 
374 	while (thread->flags & KTHREAD_SHOULDPARK) {
375 		thread->flags |= KTHREAD_PARKED;
376 		wakeup(thread);
377 		tsleep_nsec(thread, PPAUSE, "parkme", INFSLP);
378 		thread->flags &= ~KTHREAD_PARKED;
379 	}
380 }
381 
382 void
kthread_park(struct proc * p)383 kthread_park(struct proc *p)
384 {
385 	struct kthread *thread = kthread_lookup(p);
386 
387 	while ((thread->flags & KTHREAD_PARKED) == 0) {
388 		thread->flags |= KTHREAD_SHOULDPARK;
389 		wake_up_process(thread->proc);
390 		tsleep_nsec(thread, PPAUSE, "park", INFSLP);
391 	}
392 }
393 
394 void
kthread_unpark(struct proc * p)395 kthread_unpark(struct proc *p)
396 {
397 	struct kthread *thread = kthread_lookup(p);
398 
399 	thread->flags &= ~KTHREAD_SHOULDPARK;
400 	wakeup(thread);
401 }
402 
403 int
kthread_should_stop(void)404 kthread_should_stop(void)
405 {
406 	struct kthread *thread = kthread_lookup(curproc);
407 	return (thread->flags & KTHREAD_SHOULDSTOP);
408 }
409 
410 void
kthread_stop(struct proc * p)411 kthread_stop(struct proc *p)
412 {
413 	struct kthread *thread = kthread_lookup(p);
414 
415 	while ((thread->flags & KTHREAD_STOPPED) == 0) {
416 		thread->flags |= KTHREAD_SHOULDSTOP;
417 		kthread_unpark(p);
418 		wake_up_process(thread->proc);
419 		tsleep_nsec(thread, PPAUSE, "stop", INFSLP);
420 	}
421 	LIST_REMOVE(thread, next);
422 	free(thread, M_DRM, sizeof(*thread));
423 }
424 
425 #if NBIOS > 0
426 extern char smbios_board_vendor[];
427 extern char smbios_board_prod[];
428 extern char smbios_board_serial[];
429 #endif
430 
431 bool
dmi_match(int slot,const char * str)432 dmi_match(int slot, const char *str)
433 {
434 	switch (slot) {
435 	case DMI_SYS_VENDOR:
436 		if (hw_vendor != NULL &&
437 		    !strcmp(hw_vendor, str))
438 			return true;
439 		break;
440 	case DMI_PRODUCT_NAME:
441 		if (hw_prod != NULL &&
442 		    !strcmp(hw_prod, str))
443 			return true;
444 		break;
445 	case DMI_PRODUCT_VERSION:
446 		if (hw_ver != NULL &&
447 		    !strcmp(hw_ver, str))
448 			return true;
449 		break;
450 #if NBIOS > 0
451 	case DMI_BOARD_VENDOR:
452 		if (strcmp(smbios_board_vendor, str) == 0)
453 			return true;
454 		break;
455 	case DMI_BOARD_NAME:
456 		if (strcmp(smbios_board_prod, str) == 0)
457 			return true;
458 		break;
459 	case DMI_BOARD_SERIAL:
460 		if (strcmp(smbios_board_serial, str) == 0)
461 			return true;
462 		break;
463 #else
464 	case DMI_BOARD_VENDOR:
465 		if (hw_vendor != NULL &&
466 		    !strcmp(hw_vendor, str))
467 			return true;
468 		break;
469 	case DMI_BOARD_NAME:
470 		if (hw_prod != NULL &&
471 		    !strcmp(hw_prod, str))
472 			return true;
473 		break;
474 #endif
475 	case DMI_NONE:
476 	default:
477 		return false;
478 	}
479 
480 	return false;
481 }
482 
483 static bool
dmi_found(const struct dmi_system_id * dsi)484 dmi_found(const struct dmi_system_id *dsi)
485 {
486 	int i, slot;
487 
488 	for (i = 0; i < nitems(dsi->matches); i++) {
489 		slot = dsi->matches[i].slot;
490 		if (slot == DMI_NONE)
491 			break;
492 		if (!dmi_match(slot, dsi->matches[i].substr))
493 			return false;
494 	}
495 
496 	return true;
497 }
498 
499 const struct dmi_system_id *
dmi_first_match(const struct dmi_system_id * sysid)500 dmi_first_match(const struct dmi_system_id *sysid)
501 {
502 	const struct dmi_system_id *dsi;
503 
504 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
505 		if (dmi_found(dsi))
506 			return dsi;
507 	}
508 
509 	return NULL;
510 }
511 
512 #if NBIOS > 0
513 extern char smbios_bios_date[];
514 extern char smbios_bios_version[];
515 #endif
516 
517 const char *
dmi_get_system_info(int slot)518 dmi_get_system_info(int slot)
519 {
520 #if NBIOS > 0
521 	switch (slot) {
522 	case DMI_BIOS_DATE:
523 		return smbios_bios_date;
524 	case DMI_BIOS_VERSION:
525 		return smbios_bios_version;
526 	default:
527 		printf("%s slot %d not handled\n", __func__, slot);
528 	}
529 #endif
530 	return NULL;
531 }
532 
533 int
dmi_check_system(const struct dmi_system_id * sysid)534 dmi_check_system(const struct dmi_system_id *sysid)
535 {
536 	const struct dmi_system_id *dsi;
537 	int num = 0;
538 
539 	for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
540 		if (dmi_found(dsi)) {
541 			num++;
542 			if (dsi->callback && dsi->callback(dsi))
543 				break;
544 		}
545 	}
546 	return (num);
547 }
548 
549 struct vm_page *
alloc_pages(unsigned int gfp_mask,unsigned int order)550 alloc_pages(unsigned int gfp_mask, unsigned int order)
551 {
552 	int flags = (gfp_mask & M_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
553 	struct uvm_constraint_range *constraint = &no_constraint;
554 	struct pglist mlist;
555 
556 	if (gfp_mask & M_CANFAIL)
557 		flags |= UVM_PLA_FAILOK;
558 	if (gfp_mask & M_ZERO)
559 		flags |= UVM_PLA_ZERO;
560 	if (gfp_mask & __GFP_DMA32)
561 		constraint = &dma_constraint;
562 
563 	TAILQ_INIT(&mlist);
564 	if (uvm_pglistalloc(PAGE_SIZE << order, constraint->ucr_low,
565 	    constraint->ucr_high, PAGE_SIZE, 0, &mlist, 1, flags))
566 		return NULL;
567 	return TAILQ_FIRST(&mlist);
568 }
569 
570 void
__free_pages(struct vm_page * page,unsigned int order)571 __free_pages(struct vm_page *page, unsigned int order)
572 {
573 	struct pglist mlist;
574 	int i;
575 
576 	TAILQ_INIT(&mlist);
577 	for (i = 0; i < (1 << order); i++)
578 		TAILQ_INSERT_TAIL(&mlist, &page[i], pageq);
579 	uvm_pglistfree(&mlist);
580 }
581 
582 void
__pagevec_release(struct pagevec * pvec)583 __pagevec_release(struct pagevec *pvec)
584 {
585 	struct pglist mlist;
586 	int i;
587 
588 	TAILQ_INIT(&mlist);
589 	for (i = 0; i < pvec->nr; i++)
590 		TAILQ_INSERT_TAIL(&mlist, pvec->pages[i], pageq);
591 	uvm_pglistfree(&mlist);
592 	pagevec_reinit(pvec);
593 }
594 
595 static struct kmem_va_mode kv_physwait = {
596 	.kv_map = &phys_map,
597 	.kv_wait = 1,
598 };
599 
600 void *
kmap(struct vm_page * pg)601 kmap(struct vm_page *pg)
602 {
603 	vaddr_t va;
604 
605 #if defined (__HAVE_PMAP_DIRECT)
606 	va = pmap_map_direct(pg);
607 #else
608 	va = (vaddr_t)km_alloc(PAGE_SIZE, &kv_physwait, &kp_none, &kd_waitok);
609 	pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
610 	pmap_update(pmap_kernel());
611 #endif
612 	return (void *)va;
613 }
614 
615 void
kunmap_va(void * addr)616 kunmap_va(void *addr)
617 {
618 	vaddr_t va = (vaddr_t)addr;
619 
620 #if defined (__HAVE_PMAP_DIRECT)
621 	pmap_unmap_direct(va);
622 #else
623 	pmap_kremove(va, PAGE_SIZE);
624 	pmap_update(pmap_kernel());
625 	km_free((void *)va, PAGE_SIZE, &kv_physwait, &kp_none);
626 #endif
627 }
628 
629 vaddr_t kmap_atomic_va;
630 int kmap_atomic_inuse;
631 
632 void *
kmap_atomic_prot(struct vm_page * pg,pgprot_t prot)633 kmap_atomic_prot(struct vm_page *pg, pgprot_t prot)
634 {
635 	KASSERT(!kmap_atomic_inuse);
636 
637 	kmap_atomic_inuse = 1;
638 	pmap_kenter_pa(kmap_atomic_va, VM_PAGE_TO_PHYS(pg) | prot,
639 	    PROT_READ | PROT_WRITE);
640 	return (void *)kmap_atomic_va;
641 }
642 
643 void
kunmap_atomic(void * addr)644 kunmap_atomic(void *addr)
645 {
646 	KASSERT(kmap_atomic_inuse);
647 
648 	pmap_kremove(kmap_atomic_va, PAGE_SIZE);
649 	kmap_atomic_inuse = 0;
650 }
651 
652 void *
vmap(struct vm_page ** pages,unsigned int npages,unsigned long flags,pgprot_t prot)653 vmap(struct vm_page **pages, unsigned int npages, unsigned long flags,
654      pgprot_t prot)
655 {
656 	vaddr_t va;
657 	paddr_t pa;
658 	int i;
659 
660 	va = (vaddr_t)km_alloc(PAGE_SIZE * npages, &kv_any, &kp_none,
661 	    &kd_nowait);
662 	if (va == 0)
663 		return NULL;
664 	for (i = 0; i < npages; i++) {
665 		pa = VM_PAGE_TO_PHYS(pages[i]) | prot;
666 		pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa,
667 		    PROT_READ | PROT_WRITE,
668 		    PROT_READ | PROT_WRITE | PMAP_WIRED);
669 		pmap_update(pmap_kernel());
670 	}
671 
672 	return (void *)va;
673 }
674 
675 void *
vmap_pfn(unsigned long * pfns,unsigned int npfn,pgprot_t prot)676 vmap_pfn(unsigned long *pfns, unsigned int npfn, pgprot_t prot)
677 {
678 	vaddr_t va;
679 	paddr_t pa;
680 	int i;
681 
682 	va = (vaddr_t)km_alloc(PAGE_SIZE * npfn, &kv_any, &kp_none,
683 	    &kd_nowait);
684 	if (va == 0)
685 		return NULL;
686 	for (i = 0; i < npfn; i++) {
687 		pa = round_page(pfns[i]) | prot;
688 		pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa,
689 		    PROT_READ | PROT_WRITE,
690 		    PROT_READ | PROT_WRITE | PMAP_WIRED);
691 		pmap_update(pmap_kernel());
692 	}
693 
694 	return (void *)va;
695 }
696 
697 void
vunmap(void * addr,size_t size)698 vunmap(void *addr, size_t size)
699 {
700 	vaddr_t va = (vaddr_t)addr;
701 
702 	pmap_remove(pmap_kernel(), va, va + size);
703 	pmap_update(pmap_kernel());
704 	km_free((void *)va, size, &kv_any, &kp_none);
705 }
706 
707 bool
is_vmalloc_addr(const void * p)708 is_vmalloc_addr(const void *p)
709 {
710 	vaddr_t min, max, addr;
711 
712 	min = vm_map_min(kernel_map);
713 	max = vm_map_max(kernel_map);
714 	addr = (vaddr_t)p;
715 
716 	if (addr >= min && addr <= max)
717 		return true;
718 	else
719 		return false;
720 }
721 
722 void
print_hex_dump(const char * level,const char * prefix_str,int prefix_type,int rowsize,int groupsize,const void * buf,size_t len,bool ascii)723 print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
724     int rowsize, int groupsize, const void *buf, size_t len, bool ascii)
725 {
726 	const uint8_t *cbuf = buf;
727 	int i;
728 
729 	for (i = 0; i < len; i++) {
730 		if ((i % rowsize) == 0)
731 			printf("%s", prefix_str);
732 		printf("%02x", cbuf[i]);
733 		if ((i % rowsize) == (rowsize - 1))
734 			printf("\n");
735 		else
736 			printf(" ");
737 	}
738 }
739 
740 void *
memchr_inv(const void * s,int c,size_t n)741 memchr_inv(const void *s, int c, size_t n)
742 {
743 	if (n != 0) {
744 		const unsigned char *p = s;
745 
746 		do {
747 			if (*p++ != (unsigned char)c)
748 				return ((void *)(p - 1));
749 		} while (--n != 0);
750 	}
751 	return (NULL);
752 }
753 
754 int
panic_cmp(struct rb_node * a,struct rb_node * b)755 panic_cmp(struct rb_node *a, struct rb_node *b)
756 {
757 	panic(__func__);
758 }
759 
760 #undef RB_ROOT
761 #define RB_ROOT(head)	(head)->rbh_root
762 
763 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
764 
765 /*
766  * This is a fairly minimal implementation of the Linux "idr" API.  It
767  * probably isn't very efficient, and definitely isn't RCU safe.  The
768  * pre-load buffer is global instead of per-cpu; we rely on the kernel
769  * lock to make this work.  We do randomize our IDs in order to make
770  * them harder to guess.
771  */
772 
773 int idr_cmp(struct idr_entry *, struct idr_entry *);
774 SPLAY_PROTOTYPE(idr_tree, idr_entry, entry, idr_cmp);
775 
776 struct pool idr_pool;
777 struct idr_entry *idr_entry_cache;
778 
779 void
idr_init(struct idr * idr)780 idr_init(struct idr *idr)
781 {
782 	SPLAY_INIT(&idr->tree);
783 }
784 
785 void
idr_destroy(struct idr * idr)786 idr_destroy(struct idr *idr)
787 {
788 	struct idr_entry *id;
789 
790 	while ((id = SPLAY_MIN(idr_tree, &idr->tree))) {
791 		SPLAY_REMOVE(idr_tree, &idr->tree, id);
792 		pool_put(&idr_pool, id);
793 	}
794 }
795 
796 void
idr_preload(unsigned int gfp_mask)797 idr_preload(unsigned int gfp_mask)
798 {
799 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
800 
801 	KERNEL_ASSERT_LOCKED();
802 
803 	if (idr_entry_cache == NULL)
804 		idr_entry_cache = pool_get(&idr_pool, flags);
805 }
806 
807 int
idr_alloc(struct idr * idr,void * ptr,int start,int end,gfp_t gfp_mask)808 idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
809 {
810 	int flags = (gfp_mask & GFP_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
811 	struct idr_entry *id;
812 	int begin;
813 
814 	KERNEL_ASSERT_LOCKED();
815 
816 	if (idr_entry_cache) {
817 		id = idr_entry_cache;
818 		idr_entry_cache = NULL;
819 	} else {
820 		id = pool_get(&idr_pool, flags);
821 		if (id == NULL)
822 			return -ENOMEM;
823 	}
824 
825 	if (end <= 0)
826 		end = INT_MAX;
827 
828 #ifdef notyet
829 	id->id = begin = start + arc4random_uniform(end - start);
830 #else
831 	id->id = begin = start;
832 #endif
833 	while (SPLAY_INSERT(idr_tree, &idr->tree, id)) {
834 		if (id->id == end)
835 			id->id = start;
836 		else
837 			id->id++;
838 		if (id->id == begin) {
839 			pool_put(&idr_pool, id);
840 			return -ENOSPC;
841 		}
842 	}
843 	id->ptr = ptr;
844 	return id->id;
845 }
846 
847 void *
idr_replace(struct idr * idr,void * ptr,unsigned long id)848 idr_replace(struct idr *idr, void *ptr, unsigned long id)
849 {
850 	struct idr_entry find, *res;
851 	void *old;
852 
853 	find.id = id;
854 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
855 	if (res == NULL)
856 		return ERR_PTR(-ENOENT);
857 	old = res->ptr;
858 	res->ptr = ptr;
859 	return old;
860 }
861 
862 void *
idr_remove(struct idr * idr,unsigned long id)863 idr_remove(struct idr *idr, unsigned long id)
864 {
865 	struct idr_entry find, *res;
866 	void *ptr = NULL;
867 
868 	find.id = id;
869 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
870 	if (res) {
871 		SPLAY_REMOVE(idr_tree, &idr->tree, res);
872 		ptr = res->ptr;
873 		pool_put(&idr_pool, res);
874 	}
875 	return ptr;
876 }
877 
878 void *
idr_find(struct idr * idr,unsigned long id)879 idr_find(struct idr *idr, unsigned long id)
880 {
881 	struct idr_entry find, *res;
882 
883 	find.id = id;
884 	res = SPLAY_FIND(idr_tree, &idr->tree, &find);
885 	if (res == NULL)
886 		return NULL;
887 	return res->ptr;
888 }
889 
890 void *
idr_get_next(struct idr * idr,int * id)891 idr_get_next(struct idr *idr, int *id)
892 {
893 	struct idr_entry *res;
894 
895 	SPLAY_FOREACH(res, idr_tree, &idr->tree) {
896 		if (res->id >= *id) {
897 			*id = res->id;
898 			return res->ptr;
899 		}
900 	}
901 
902 	return NULL;
903 }
904 
905 int
idr_for_each(struct idr * idr,int (* func)(int,void *,void *),void * data)906 idr_for_each(struct idr *idr, int (*func)(int, void *, void *), void *data)
907 {
908 	struct idr_entry *id;
909 	int ret;
910 
911 	SPLAY_FOREACH(id, idr_tree, &idr->tree) {
912 		ret = func(id->id, id->ptr, data);
913 		if (ret)
914 			return ret;
915 	}
916 
917 	return 0;
918 }
919 
920 int
idr_cmp(struct idr_entry * a,struct idr_entry * b)921 idr_cmp(struct idr_entry *a, struct idr_entry *b)
922 {
923 	return (a->id < b->id ? -1 : a->id > b->id);
924 }
925 
926 SPLAY_GENERATE(idr_tree, idr_entry, entry, idr_cmp);
927 
928 void
ida_init(struct ida * ida)929 ida_init(struct ida *ida)
930 {
931 	idr_init(&ida->idr);
932 }
933 
934 void
ida_destroy(struct ida * ida)935 ida_destroy(struct ida *ida)
936 {
937 	idr_destroy(&ida->idr);
938 }
939 
940 int
ida_simple_get(struct ida * ida,unsigned int start,unsigned int end,gfp_t gfp_mask)941 ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
942     gfp_t gfp_mask)
943 {
944 	return idr_alloc(&ida->idr, NULL, start, end, gfp_mask);
945 }
946 
947 void
ida_simple_remove(struct ida * ida,unsigned int id)948 ida_simple_remove(struct ida *ida, unsigned int id)
949 {
950 	idr_remove(&ida->idr, id);
951 }
952 
953 int
ida_alloc_min(struct ida * ida,unsigned int min,gfp_t gfp)954 ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp)
955 {
956 	return idr_alloc(&ida->idr, NULL, min, INT_MAX, gfp);
957 }
958 
959 int
ida_alloc_max(struct ida * ida,unsigned int max,gfp_t gfp)960 ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp)
961 {
962 	return idr_alloc(&ida->idr, NULL, 0, max - 1, gfp);
963 }
964 
965 void
ida_free(struct ida * ida,unsigned int id)966 ida_free(struct ida *ida, unsigned int id)
967 {
968 	idr_remove(&ida->idr, id);
969 }
970 
971 int
xarray_cmp(struct xarray_entry * a,struct xarray_entry * b)972 xarray_cmp(struct xarray_entry *a, struct xarray_entry *b)
973 {
974 	return (a->id < b->id ? -1 : a->id > b->id);
975 }
976 
977 SPLAY_PROTOTYPE(xarray_tree, xarray_entry, entry, xarray_cmp);
978 struct pool xa_pool;
979 SPLAY_GENERATE(xarray_tree, xarray_entry, entry, xarray_cmp);
980 
981 void
xa_init_flags(struct xarray * xa,gfp_t flags)982 xa_init_flags(struct xarray *xa, gfp_t flags)
983 {
984 	static int initialized;
985 
986 	if (!initialized) {
987 		pool_init(&xa_pool, sizeof(struct xarray_entry), 0, IPL_NONE, 0,
988 		    "xapl", NULL);
989 		initialized = 1;
990 	}
991 	SPLAY_INIT(&xa->xa_tree);
992 	if (flags & XA_FLAGS_LOCK_IRQ)
993 		mtx_init(&xa->xa_lock, IPL_TTY);
994 	else
995 		mtx_init(&xa->xa_lock, IPL_NONE);
996 }
997 
998 void
xa_destroy(struct xarray * xa)999 xa_destroy(struct xarray *xa)
1000 {
1001 	struct xarray_entry *id;
1002 
1003 	while ((id = SPLAY_MIN(xarray_tree, &xa->xa_tree))) {
1004 		SPLAY_REMOVE(xarray_tree, &xa->xa_tree, id);
1005 		pool_put(&xa_pool, id);
1006 	}
1007 }
1008 
1009 /* Don't wrap ids. */
1010 int
__xa_alloc(struct xarray * xa,u32 * id,void * entry,int limit,gfp_t gfp)1011 __xa_alloc(struct xarray *xa, u32 *id, void *entry, int limit, gfp_t gfp)
1012 {
1013 	struct xarray_entry *xid;
1014 	int start = (xa->xa_flags & XA_FLAGS_ALLOC1) ? 1 : 0;
1015 	int begin;
1016 
1017 	if (gfp & GFP_NOWAIT) {
1018 		xid = pool_get(&xa_pool, PR_NOWAIT);
1019 	} else {
1020 		mtx_leave(&xa->xa_lock);
1021 		xid = pool_get(&xa_pool, PR_WAITOK);
1022 		mtx_enter(&xa->xa_lock);
1023 	}
1024 
1025 	if (xid == NULL)
1026 		return -ENOMEM;
1027 
1028 	if (limit <= 0)
1029 		limit = INT_MAX;
1030 
1031 	xid->id = begin = start;
1032 
1033 	while (SPLAY_INSERT(xarray_tree, &xa->xa_tree, xid)) {
1034 		if (xid->id == limit)
1035 			xid->id = start;
1036 		else
1037 			xid->id++;
1038 		if (xid->id == begin) {
1039 			pool_put(&xa_pool, xid);
1040 			return -EBUSY;
1041 		}
1042 	}
1043 	xid->ptr = entry;
1044 	*id = xid->id;
1045 	return 0;
1046 }
1047 
1048 /*
1049  * Wrap ids and store next id.
1050  * We walk the entire tree so don't special case wrapping.
1051  * The only caller of this (i915_drm_client.c) doesn't use next id.
1052  */
1053 int
__xa_alloc_cyclic(struct xarray * xa,u32 * id,void * entry,int limit,u32 * next,gfp_t gfp)1054 __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, int limit, u32 *next,
1055     gfp_t gfp)
1056 {
1057 	int r = __xa_alloc(xa, id, entry, limit, gfp);
1058 	*next = *id + 1;
1059 	return r;
1060 }
1061 
1062 void *
__xa_erase(struct xarray * xa,unsigned long index)1063 __xa_erase(struct xarray *xa, unsigned long index)
1064 {
1065 	struct xarray_entry find, *res;
1066 	void *ptr = NULL;
1067 
1068 	find.id = index;
1069 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
1070 	if (res) {
1071 		SPLAY_REMOVE(xarray_tree, &xa->xa_tree, res);
1072 		ptr = res->ptr;
1073 		pool_put(&xa_pool, res);
1074 	}
1075 	return ptr;
1076 }
1077 
1078 void *
__xa_load(struct xarray * xa,unsigned long index)1079 __xa_load(struct xarray *xa, unsigned long index)
1080 {
1081 	struct xarray_entry find, *res;
1082 
1083 	find.id = index;
1084 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
1085 	if (res == NULL)
1086 		return NULL;
1087 	return res->ptr;
1088 }
1089 
1090 void *
__xa_store(struct xarray * xa,unsigned long index,void * entry,gfp_t gfp)1091 __xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1092 {
1093 	struct xarray_entry find, *res;
1094 	void *prev;
1095 
1096 	if (entry == NULL)
1097 		return __xa_erase(xa, index);
1098 
1099 	find.id = index;
1100 	res = SPLAY_FIND(xarray_tree, &xa->xa_tree, &find);
1101 	if (res != NULL) {
1102 		/* index exists */
1103 		/* XXX Multislot entries updates not implemented yet */
1104 		prev = res->ptr;
1105 		res->ptr = entry;
1106 		return prev;
1107 	}
1108 
1109 	/* index not found, add new */
1110 	if (gfp & GFP_NOWAIT) {
1111 		res = pool_get(&xa_pool, PR_NOWAIT);
1112 	} else {
1113 		mtx_leave(&xa->xa_lock);
1114 		res = pool_get(&xa_pool, PR_WAITOK);
1115 		mtx_enter(&xa->xa_lock);
1116 	}
1117 	if (res == NULL)
1118 		return XA_ERROR(-ENOMEM);
1119 	res->id = index;
1120 	res->ptr = entry;
1121 	if (SPLAY_INSERT(xarray_tree, &xa->xa_tree, res) != NULL)
1122 		return XA_ERROR(-EINVAL);
1123 	return NULL; /* no prev entry at index */
1124 }
1125 
1126 void *
xa_get_next(struct xarray * xa,unsigned long * index)1127 xa_get_next(struct xarray *xa, unsigned long *index)
1128 {
1129 	struct xarray_entry *res;
1130 
1131 	SPLAY_FOREACH(res, xarray_tree, &xa->xa_tree) {
1132 		if (res->id >= *index) {
1133 			*index = res->id;
1134 			return res->ptr;
1135 		}
1136 	}
1137 
1138 	return NULL;
1139 }
1140 
1141 int
sg_alloc_table(struct sg_table * table,unsigned int nents,gfp_t gfp_mask)1142 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
1143 {
1144 	table->sgl = mallocarray(nents, sizeof(struct scatterlist),
1145 	    M_DRM, gfp_mask | M_ZERO);
1146 	if (table->sgl == NULL)
1147 		return -ENOMEM;
1148 	table->nents = table->orig_nents = nents;
1149 	sg_mark_end(&table->sgl[nents - 1]);
1150 	return 0;
1151 }
1152 
1153 void
sg_free_table(struct sg_table * table)1154 sg_free_table(struct sg_table *table)
1155 {
1156 	free(table->sgl, M_DRM,
1157 	    table->orig_nents * sizeof(struct scatterlist));
1158 	table->orig_nents = 0;
1159 	table->sgl = NULL;
1160 }
1161 
1162 size_t
sg_copy_from_buffer(struct scatterlist * sgl,unsigned int nents,const void * buf,size_t buflen)1163 sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
1164     const void *buf, size_t buflen)
1165 {
1166 	panic("%s", __func__);
1167 }
1168 
1169 int
i2c_master_xfer(struct i2c_adapter * adap,struct i2c_msg * msgs,int num)1170 i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1171 {
1172 	void *cmd = NULL;
1173 	int cmdlen = 0;
1174 	int err, ret = 0;
1175 	int op;
1176 
1177 	iic_acquire_bus(&adap->ic, 0);
1178 
1179 	while (num > 2) {
1180 		op = (msgs->flags & I2C_M_RD) ? I2C_OP_READ : I2C_OP_WRITE;
1181 		err = iic_exec(&adap->ic, op, msgs->addr, NULL, 0,
1182 		    msgs->buf, msgs->len, 0);
1183 		if (err) {
1184 			ret = -err;
1185 			goto fail;
1186 		}
1187 		msgs++;
1188 		num--;
1189 		ret++;
1190 	}
1191 
1192 	if (num > 1) {
1193 		cmd = msgs->buf;
1194 		cmdlen = msgs->len;
1195 		msgs++;
1196 		num--;
1197 		ret++;
1198 	}
1199 
1200 	op = (msgs->flags & I2C_M_RD) ?
1201 	    I2C_OP_READ_WITH_STOP : I2C_OP_WRITE_WITH_STOP;
1202 	err = iic_exec(&adap->ic, op, msgs->addr, cmd, cmdlen,
1203 	    msgs->buf, msgs->len, 0);
1204 	if (err) {
1205 		ret = -err;
1206 		goto fail;
1207 	}
1208 	msgs++;
1209 	ret++;
1210 
1211 fail:
1212 	iic_release_bus(&adap->ic, 0);
1213 
1214 	return ret;
1215 }
1216 
1217 int
__i2c_transfer(struct i2c_adapter * adap,struct i2c_msg * msgs,int num)1218 __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1219 {
1220 	int ret, retries;
1221 
1222 	retries = adap->retries;
1223 retry:
1224 	if (adap->algo)
1225 		ret = adap->algo->master_xfer(adap, msgs, num);
1226 	else
1227 		ret = i2c_master_xfer(adap, msgs, num);
1228 	if (ret == -EAGAIN && retries > 0) {
1229 		retries--;
1230 		goto retry;
1231 	}
1232 
1233 	return ret;
1234 }
1235 
1236 int
i2c_transfer(struct i2c_adapter * adap,struct i2c_msg * msgs,int num)1237 i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1238 {
1239 	int ret;
1240 
1241 	if (adap->lock_ops)
1242 		adap->lock_ops->lock_bus(adap, 0);
1243 
1244 	ret = __i2c_transfer(adap, msgs, num);
1245 
1246 	if (adap->lock_ops)
1247 		adap->lock_ops->unlock_bus(adap, 0);
1248 
1249 	return ret;
1250 }
1251 
1252 int
i2c_bb_master_xfer(struct i2c_adapter * adap,struct i2c_msg * msgs,int num)1253 i2c_bb_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
1254 {
1255 	struct i2c_algo_bit_data *algo = adap->algo_data;
1256 	struct i2c_adapter bb;
1257 
1258 	memset(&bb, 0, sizeof(bb));
1259 	bb.ic = algo->ic;
1260 	bb.retries = adap->retries;
1261 	return i2c_master_xfer(&bb, msgs, num);
1262 }
1263 
1264 uint32_t
i2c_bb_functionality(struct i2c_adapter * adap)1265 i2c_bb_functionality(struct i2c_adapter *adap)
1266 {
1267 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1268 }
1269 
1270 struct i2c_algorithm i2c_bit_algo = {
1271 	.master_xfer = i2c_bb_master_xfer,
1272 	.functionality = i2c_bb_functionality
1273 };
1274 
1275 int
i2c_bit_add_bus(struct i2c_adapter * adap)1276 i2c_bit_add_bus(struct i2c_adapter *adap)
1277 {
1278 	adap->algo = &i2c_bit_algo;
1279 	adap->retries = 3;
1280 
1281 	return 0;
1282 }
1283 
1284 #if defined(__amd64__) || defined(__i386__)
1285 
1286 /*
1287  * This is a minimal implementation of the Linux vga_get/vga_put
1288  * interface.  In all likelihood, it will only work for inteldrm(4) as
1289  * it assumes that if there is another active VGA device in the
1290  * system, it is sitting behind a PCI bridge.
1291  */
1292 
1293 extern int pci_enumerate_bus(struct pci_softc *,
1294     int (*)(struct pci_attach_args *), struct pci_attach_args *);
1295 
1296 pcitag_t vga_bridge_tag;
1297 int vga_bridge_disabled;
1298 
1299 int
vga_disable_bridge(struct pci_attach_args * pa)1300 vga_disable_bridge(struct pci_attach_args *pa)
1301 {
1302 	pcireg_t bhlc, bc;
1303 
1304 	if (pa->pa_domain != 0)
1305 		return 0;
1306 
1307 	bhlc = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
1308 	if (PCI_HDRTYPE_TYPE(bhlc) != 1)
1309 		return 0;
1310 
1311 	bc = pci_conf_read(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL);
1312 	if ((bc & PPB_BC_VGA_ENABLE) == 0)
1313 		return 0;
1314 	bc &= ~PPB_BC_VGA_ENABLE;
1315 	pci_conf_write(pa->pa_pc, pa->pa_tag, PPB_REG_BRIDGECONTROL, bc);
1316 
1317 	vga_bridge_tag = pa->pa_tag;
1318 	vga_bridge_disabled = 1;
1319 
1320 	return 1;
1321 }
1322 
1323 void
vga_get_uninterruptible(struct pci_dev * pdev,int rsrc)1324 vga_get_uninterruptible(struct pci_dev *pdev, int rsrc)
1325 {
1326 	if (pdev->pci->sc_bridgetag != NULL)
1327 		return;
1328 	pci_enumerate_bus(pdev->pci, vga_disable_bridge, NULL);
1329 }
1330 
1331 void
vga_put(struct pci_dev * pdev,int rsrc)1332 vga_put(struct pci_dev *pdev, int rsrc)
1333 {
1334 	pcireg_t bc;
1335 
1336 	if (!vga_bridge_disabled)
1337 		return;
1338 
1339 	bc = pci_conf_read(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL);
1340 	bc |= PPB_BC_VGA_ENABLE;
1341 	pci_conf_write(pdev->pc, vga_bridge_tag, PPB_REG_BRIDGECONTROL, bc);
1342 
1343 	vga_bridge_disabled = 0;
1344 }
1345 
1346 #endif
1347 
1348 /*
1349  * ACPI types and interfaces.
1350  */
1351 
1352 #ifdef __HAVE_ACPI
1353 #include "acpi.h"
1354 #endif
1355 
1356 #if NACPI > 0
1357 
1358 #include <dev/acpi/acpireg.h>
1359 #include <dev/acpi/acpivar.h>
1360 #include <dev/acpi/amltypes.h>
1361 #include <dev/acpi/dsdt.h>
1362 
1363 acpi_status
acpi_get_table(const char * sig,int instance,struct acpi_table_header ** hdr)1364 acpi_get_table(const char *sig, int instance,
1365     struct acpi_table_header **hdr)
1366 {
1367 	struct acpi_softc *sc = acpi_softc;
1368 	struct acpi_q *entry;
1369 
1370 	KASSERT(instance == 1);
1371 
1372 	if (sc == NULL)
1373 		return AE_NOT_FOUND;
1374 
1375 	SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) {
1376 		if (memcmp(entry->q_table, sig, strlen(sig)) == 0) {
1377 			*hdr = entry->q_table;
1378 			return 0;
1379 		}
1380 	}
1381 
1382 	return AE_NOT_FOUND;
1383 }
1384 
1385 void
acpi_put_table(struct acpi_table_header * hdr)1386 acpi_put_table(struct acpi_table_header *hdr)
1387 {
1388 }
1389 
1390 acpi_status
acpi_get_handle(acpi_handle node,const char * name,acpi_handle * rnode)1391 acpi_get_handle(acpi_handle node, const char *name, acpi_handle *rnode)
1392 {
1393 	node = aml_searchname(node, name);
1394 	if (node == NULL)
1395 		return AE_NOT_FOUND;
1396 
1397 	*rnode = node;
1398 	return 0;
1399 }
1400 
1401 acpi_status
acpi_get_name(acpi_handle node,int type,struct acpi_buffer * buffer)1402 acpi_get_name(acpi_handle node, int type,  struct acpi_buffer *buffer)
1403 {
1404 	KASSERT(buffer->length != ACPI_ALLOCATE_BUFFER);
1405 	KASSERT(type == ACPI_FULL_PATHNAME);
1406 	strlcpy(buffer->pointer, aml_nodename(node), buffer->length);
1407 	return 0;
1408 }
1409 
1410 acpi_status
acpi_evaluate_object(acpi_handle node,const char * name,struct acpi_object_list * params,struct acpi_buffer * result)1411 acpi_evaluate_object(acpi_handle node, const char *name,
1412     struct acpi_object_list *params, struct acpi_buffer *result)
1413 {
1414 	struct aml_value args[4], res;
1415 	union acpi_object *obj;
1416 	uint8_t *data;
1417 	int i;
1418 
1419 	KASSERT(params->count <= nitems(args));
1420 
1421 	for (i = 0; i < params->count; i++) {
1422 		args[i].type = params->pointer[i].type;
1423 		switch (args[i].type) {
1424 		case AML_OBJTYPE_INTEGER:
1425 			args[i].v_integer = params->pointer[i].integer.value;
1426 			break;
1427 		case AML_OBJTYPE_BUFFER:
1428 			args[i].length = params->pointer[i].buffer.length;
1429 			args[i].v_buffer = params->pointer[i].buffer.pointer;
1430 			break;
1431 		default:
1432 			printf("%s: arg type 0x%02x", __func__, args[i].type);
1433 			return AE_BAD_PARAMETER;
1434 		}
1435 	}
1436 
1437 	if (name) {
1438 		node = aml_searchname(node, name);
1439 		if (node == NULL)
1440 			return AE_NOT_FOUND;
1441 	}
1442 	if (aml_evalnode(acpi_softc, node, params->count, args, &res)) {
1443 		aml_freevalue(&res);
1444 		return AE_ERROR;
1445 	}
1446 
1447 	KASSERT(result->length == ACPI_ALLOCATE_BUFFER);
1448 
1449 	result->length = sizeof(union acpi_object);
1450 	switch (res.type) {
1451 	case AML_OBJTYPE_BUFFER:
1452 		result->length += res.length;
1453 		result->pointer = malloc(result->length, M_DRM, M_WAITOK);
1454 		obj = (union acpi_object *)result->pointer;
1455 		data = (uint8_t *)(obj + 1);
1456 		obj->type = res.type;
1457 		obj->buffer.length = res.length;
1458 		obj->buffer.pointer = data;
1459 		memcpy(data, res.v_buffer, res.length);
1460 		break;
1461 	default:
1462 		printf("%s: return type 0x%02x", __func__, res.type);
1463 		aml_freevalue(&res);
1464 		return AE_ERROR;
1465 	}
1466 
1467 	aml_freevalue(&res);
1468 	return 0;
1469 }
1470 
1471 SLIST_HEAD(, notifier_block) drm_linux_acpi_notify_list =
1472 	SLIST_HEAD_INITIALIZER(drm_linux_acpi_notify_list);
1473 
1474 int
drm_linux_acpi_notify(struct aml_node * node,int notify,void * arg)1475 drm_linux_acpi_notify(struct aml_node *node, int notify, void *arg)
1476 {
1477 	struct acpi_bus_event event;
1478 	struct notifier_block *nb;
1479 
1480 	event.device_class = ACPI_VIDEO_CLASS;
1481 	event.type = notify;
1482 
1483 	SLIST_FOREACH(nb, &drm_linux_acpi_notify_list, link)
1484 		nb->notifier_call(nb, 0, &event);
1485 	return 0;
1486 }
1487 
1488 int
register_acpi_notifier(struct notifier_block * nb)1489 register_acpi_notifier(struct notifier_block *nb)
1490 {
1491 	SLIST_INSERT_HEAD(&drm_linux_acpi_notify_list, nb, link);
1492 	return 0;
1493 }
1494 
1495 int
unregister_acpi_notifier(struct notifier_block * nb)1496 unregister_acpi_notifier(struct notifier_block *nb)
1497 {
1498 	struct notifier_block *tmp;
1499 
1500 	SLIST_FOREACH(tmp, &drm_linux_acpi_notify_list, link) {
1501 		if (tmp == nb) {
1502 			SLIST_REMOVE(&drm_linux_acpi_notify_list, nb,
1503 			    notifier_block, link);
1504 			return 0;
1505 		}
1506 	}
1507 
1508 	return -ENOENT;
1509 }
1510 
1511 const char *
acpi_format_exception(acpi_status status)1512 acpi_format_exception(acpi_status status)
1513 {
1514 	switch (status) {
1515 	case AE_NOT_FOUND:
1516 		return "not found";
1517 	case AE_BAD_PARAMETER:
1518 		return "bad parameter";
1519 	default:
1520 		return "unknown";
1521 	}
1522 }
1523 
1524 #endif
1525 
1526 SLIST_HEAD(,backlight_device) backlight_device_list =
1527     SLIST_HEAD_INITIALIZER(backlight_device_list);
1528 
1529 void
backlight_do_update_status(void * arg)1530 backlight_do_update_status(void *arg)
1531 {
1532 	backlight_update_status(arg);
1533 }
1534 
1535 struct backlight_device *
backlight_device_register(const char * name,void * kdev,void * data,const struct backlight_ops * ops,const struct backlight_properties * props)1536 backlight_device_register(const char *name, void *kdev, void *data,
1537     const struct backlight_ops *ops, const struct backlight_properties *props)
1538 {
1539 	struct backlight_device *bd;
1540 
1541 	bd = malloc(sizeof(*bd), M_DRM, M_WAITOK);
1542 	bd->ops = ops;
1543 	bd->props = *props;
1544 	bd->data = data;
1545 
1546 	task_set(&bd->task, backlight_do_update_status, bd);
1547 
1548 	SLIST_INSERT_HEAD(&backlight_device_list, bd, next);
1549 	bd->name = name;
1550 
1551 	return bd;
1552 }
1553 
1554 void
backlight_device_unregister(struct backlight_device * bd)1555 backlight_device_unregister(struct backlight_device *bd)
1556 {
1557 	SLIST_REMOVE(&backlight_device_list, bd, backlight_device, next);
1558 	free(bd, M_DRM, sizeof(*bd));
1559 }
1560 
1561 void
backlight_schedule_update_status(struct backlight_device * bd)1562 backlight_schedule_update_status(struct backlight_device *bd)
1563 {
1564 	task_add(systq, &bd->task);
1565 }
1566 
1567 int
backlight_enable(struct backlight_device * bd)1568 backlight_enable(struct backlight_device *bd)
1569 {
1570 	if (bd == NULL)
1571 		return 0;
1572 
1573 	bd->props.power = FB_BLANK_UNBLANK;
1574 
1575 	return bd->ops->update_status(bd);
1576 }
1577 
1578 int
backlight_disable(struct backlight_device * bd)1579 backlight_disable(struct backlight_device *bd)
1580 {
1581 	if (bd == NULL)
1582 		return 0;
1583 
1584 	bd->props.power = FB_BLANK_POWERDOWN;
1585 
1586 	return bd->ops->update_status(bd);
1587 }
1588 
1589 struct backlight_device *
backlight_device_get_by_name(const char * name)1590 backlight_device_get_by_name(const char *name)
1591 {
1592 	struct backlight_device *bd;
1593 
1594 	SLIST_FOREACH(bd, &backlight_device_list, next) {
1595 		if (strcmp(name, bd->name) == 0)
1596 			return bd;
1597 	}
1598 
1599 	return NULL;
1600 }
1601 
1602 struct drvdata {
1603 	struct device *dev;
1604 	void *data;
1605 	SLIST_ENTRY(drvdata) next;
1606 };
1607 
1608 SLIST_HEAD(,drvdata) drvdata_list = SLIST_HEAD_INITIALIZER(drvdata_list);
1609 
1610 void
dev_set_drvdata(struct device * dev,void * data)1611 dev_set_drvdata(struct device *dev, void *data)
1612 {
1613 	struct drvdata *drvdata;
1614 
1615 	SLIST_FOREACH(drvdata, &drvdata_list, next) {
1616 		if (drvdata->dev == dev) {
1617 			drvdata->data = data;
1618 			return;
1619 		}
1620 	}
1621 
1622 	if (data == NULL)
1623 		return;
1624 
1625 	drvdata = malloc(sizeof(*drvdata), M_DRM, M_WAITOK);
1626 	drvdata->dev = dev;
1627 	drvdata->data = data;
1628 
1629 	SLIST_INSERT_HEAD(&drvdata_list, drvdata, next);
1630 }
1631 
1632 void *
dev_get_drvdata(struct device * dev)1633 dev_get_drvdata(struct device *dev)
1634 {
1635 	struct drvdata *drvdata;
1636 
1637 	SLIST_FOREACH(drvdata, &drvdata_list, next) {
1638 		if (drvdata->dev == dev)
1639 			return drvdata->data;
1640 	}
1641 
1642 	return NULL;
1643 }
1644 
1645 void
drm_sysfs_hotplug_event(struct drm_device * dev)1646 drm_sysfs_hotplug_event(struct drm_device *dev)
1647 {
1648 	knote_locked(&dev->note, NOTE_CHANGE);
1649 }
1650 
1651 void
drm_sysfs_connector_hotplug_event(struct drm_connector * connector)1652 drm_sysfs_connector_hotplug_event(struct drm_connector *connector)
1653 {
1654 	knote_locked(&connector->dev->note, NOTE_CHANGE);
1655 }
1656 
1657 void
drm_sysfs_connector_status_event(struct drm_connector * connector,struct drm_property * property)1658 drm_sysfs_connector_status_event(struct drm_connector *connector,
1659     struct drm_property *property)
1660 {
1661 	STUB();
1662 }
1663 
1664 void
drm_sysfs_connector_property_event(struct drm_connector * connector,struct drm_property * property)1665 drm_sysfs_connector_property_event(struct drm_connector *connector,
1666     struct drm_property *property)
1667 {
1668 	STUB();
1669 }
1670 
1671 struct dma_fence *
dma_fence_get(struct dma_fence * fence)1672 dma_fence_get(struct dma_fence *fence)
1673 {
1674 	if (fence)
1675 		kref_get(&fence->refcount);
1676 	return fence;
1677 }
1678 
1679 struct dma_fence *
dma_fence_get_rcu(struct dma_fence * fence)1680 dma_fence_get_rcu(struct dma_fence *fence)
1681 {
1682 	if (fence)
1683 		kref_get(&fence->refcount);
1684 	return fence;
1685 }
1686 
1687 struct dma_fence *
dma_fence_get_rcu_safe(struct dma_fence ** dfp)1688 dma_fence_get_rcu_safe(struct dma_fence **dfp)
1689 {
1690 	struct dma_fence *fence;
1691 	if (dfp == NULL)
1692 		return NULL;
1693 	fence = *dfp;
1694 	if (fence)
1695 		kref_get(&fence->refcount);
1696 	return fence;
1697 }
1698 
1699 void
dma_fence_release(struct kref * ref)1700 dma_fence_release(struct kref *ref)
1701 {
1702 	struct dma_fence *fence = container_of(ref, struct dma_fence, refcount);
1703 	if (fence->ops && fence->ops->release)
1704 		fence->ops->release(fence);
1705 	else
1706 		free(fence, M_DRM, 0);
1707 }
1708 
1709 void
dma_fence_put(struct dma_fence * fence)1710 dma_fence_put(struct dma_fence *fence)
1711 {
1712 	if (fence)
1713 		kref_put(&fence->refcount, dma_fence_release);
1714 }
1715 
1716 int
dma_fence_signal_timestamp_locked(struct dma_fence * fence,ktime_t timestamp)1717 dma_fence_signal_timestamp_locked(struct dma_fence *fence, ktime_t timestamp)
1718 {
1719 	struct dma_fence_cb *cur, *tmp;
1720 	struct list_head cb_list;
1721 
1722 	if (fence == NULL)
1723 		return -EINVAL;
1724 
1725 	if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1726 		return -EINVAL;
1727 
1728 	list_replace(&fence->cb_list, &cb_list);
1729 
1730 	fence->timestamp = timestamp;
1731 	set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
1732 
1733 	list_for_each_entry_safe(cur, tmp, &cb_list, node) {
1734 		INIT_LIST_HEAD(&cur->node);
1735 		cur->func(fence, cur);
1736 	}
1737 
1738 	return 0;
1739 }
1740 
1741 int
dma_fence_signal(struct dma_fence * fence)1742 dma_fence_signal(struct dma_fence *fence)
1743 {
1744 	int r;
1745 
1746 	if (fence == NULL)
1747 		return -EINVAL;
1748 
1749 	mtx_enter(fence->lock);
1750 	r = dma_fence_signal_timestamp_locked(fence, ktime_get());
1751 	mtx_leave(fence->lock);
1752 
1753 	return r;
1754 }
1755 
1756 int
dma_fence_signal_locked(struct dma_fence * fence)1757 dma_fence_signal_locked(struct dma_fence *fence)
1758 {
1759 	if (fence == NULL)
1760 		return -EINVAL;
1761 
1762 	return dma_fence_signal_timestamp_locked(fence, ktime_get());
1763 }
1764 
1765 int
dma_fence_signal_timestamp(struct dma_fence * fence,ktime_t timestamp)1766 dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp)
1767 {
1768 	int r;
1769 
1770 	if (fence == NULL)
1771 		return -EINVAL;
1772 
1773 	mtx_enter(fence->lock);
1774 	r = dma_fence_signal_timestamp_locked(fence, timestamp);
1775 	mtx_leave(fence->lock);
1776 
1777 	return r;
1778 }
1779 
1780 bool
dma_fence_is_signaled(struct dma_fence * fence)1781 dma_fence_is_signaled(struct dma_fence *fence)
1782 {
1783 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1784 		return true;
1785 
1786 	if (fence->ops->signaled && fence->ops->signaled(fence)) {
1787 		dma_fence_signal(fence);
1788 		return true;
1789 	}
1790 
1791 	return false;
1792 }
1793 
1794 bool
dma_fence_is_signaled_locked(struct dma_fence * fence)1795 dma_fence_is_signaled_locked(struct dma_fence *fence)
1796 {
1797 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1798 		return true;
1799 
1800 	if (fence->ops->signaled && fence->ops->signaled(fence)) {
1801 		dma_fence_signal_locked(fence);
1802 		return true;
1803 	}
1804 
1805 	return false;
1806 }
1807 
1808 ktime_t
dma_fence_timestamp(struct dma_fence * fence)1809 dma_fence_timestamp(struct dma_fence *fence)
1810 {
1811 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1812 		while (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
1813 			CPU_BUSY_CYCLE();
1814 		return fence->timestamp;
1815 	} else {
1816 		return ktime_get();
1817 	}
1818 }
1819 
1820 long
dma_fence_wait_timeout(struct dma_fence * fence,bool intr,long timeout)1821 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, long timeout)
1822 {
1823 	if (timeout < 0)
1824 		return -EINVAL;
1825 
1826 	if (fence->ops->wait)
1827 		return fence->ops->wait(fence, intr, timeout);
1828 	else
1829 		return dma_fence_default_wait(fence, intr, timeout);
1830 }
1831 
1832 long
dma_fence_wait(struct dma_fence * fence,bool intr)1833 dma_fence_wait(struct dma_fence *fence, bool intr)
1834 {
1835 	long ret;
1836 
1837 	ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
1838 	if (ret < 0)
1839 		return ret;
1840 
1841 	return 0;
1842 }
1843 
1844 void
dma_fence_enable_sw_signaling(struct dma_fence * fence)1845 dma_fence_enable_sw_signaling(struct dma_fence *fence)
1846 {
1847 	if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
1848 	    !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
1849 	    fence->ops->enable_signaling) {
1850 		mtx_enter(fence->lock);
1851 		if (!fence->ops->enable_signaling(fence))
1852 			dma_fence_signal_locked(fence);
1853 		mtx_leave(fence->lock);
1854 	}
1855 }
1856 
1857 void
dma_fence_init(struct dma_fence * fence,const struct dma_fence_ops * ops,struct mutex * lock,uint64_t context,uint64_t seqno)1858 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
1859     struct mutex *lock, uint64_t context, uint64_t seqno)
1860 {
1861 	fence->ops = ops;
1862 	fence->lock = lock;
1863 	fence->context = context;
1864 	fence->seqno = seqno;
1865 	fence->flags = 0;
1866 	fence->error = 0;
1867 	kref_init(&fence->refcount);
1868 	INIT_LIST_HEAD(&fence->cb_list);
1869 }
1870 
1871 int
dma_fence_add_callback(struct dma_fence * fence,struct dma_fence_cb * cb,dma_fence_func_t func)1872 dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
1873     dma_fence_func_t func)
1874 {
1875 	int ret = 0;
1876 	bool was_set;
1877 
1878 	if (WARN_ON(!fence || !func))
1879 		return -EINVAL;
1880 
1881 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1882 		INIT_LIST_HEAD(&cb->node);
1883 		return -ENOENT;
1884 	}
1885 
1886 	mtx_enter(fence->lock);
1887 
1888 	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
1889 
1890 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1891 		ret = -ENOENT;
1892 	else if (!was_set && fence->ops->enable_signaling) {
1893 		if (!fence->ops->enable_signaling(fence)) {
1894 			dma_fence_signal_locked(fence);
1895 			ret = -ENOENT;
1896 		}
1897 	}
1898 
1899 	if (!ret) {
1900 		cb->func = func;
1901 		list_add_tail(&cb->node, &fence->cb_list);
1902 	} else
1903 		INIT_LIST_HEAD(&cb->node);
1904 	mtx_leave(fence->lock);
1905 
1906 	return ret;
1907 }
1908 
1909 bool
dma_fence_remove_callback(struct dma_fence * fence,struct dma_fence_cb * cb)1910 dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
1911 {
1912 	bool ret;
1913 
1914 	mtx_enter(fence->lock);
1915 
1916 	ret = !list_empty(&cb->node);
1917 	if (ret)
1918 		list_del_init(&cb->node);
1919 
1920 	mtx_leave(fence->lock);
1921 
1922 	return ret;
1923 }
1924 
1925 static atomic64_t drm_fence_context_count = ATOMIC64_INIT(1);
1926 
1927 uint64_t
dma_fence_context_alloc(unsigned int num)1928 dma_fence_context_alloc(unsigned int num)
1929 {
1930   return atomic64_add_return(num, &drm_fence_context_count) - num;
1931 }
1932 
1933 struct default_wait_cb {
1934 	struct dma_fence_cb base;
1935 	struct proc *proc;
1936 };
1937 
1938 static void
dma_fence_default_wait_cb(struct dma_fence * fence,struct dma_fence_cb * cb)1939 dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1940 {
1941 	struct default_wait_cb *wait =
1942 	    container_of(cb, struct default_wait_cb, base);
1943 	wake_up_process(wait->proc);
1944 }
1945 
1946 long
dma_fence_default_wait(struct dma_fence * fence,bool intr,signed long timeout)1947 dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
1948 {
1949 	long ret = timeout ? timeout : 1;
1950 	unsigned long end;
1951 	int err;
1952 	struct default_wait_cb cb;
1953 	bool was_set;
1954 
1955 	KASSERT(timeout <= INT_MAX);
1956 
1957 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1958 		return ret;
1959 
1960 	mtx_enter(fence->lock);
1961 
1962 	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
1963 	    &fence->flags);
1964 
1965 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1966 		goto out;
1967 
1968 	if (!was_set && fence->ops->enable_signaling) {
1969 		if (!fence->ops->enable_signaling(fence)) {
1970 			dma_fence_signal_locked(fence);
1971 			goto out;
1972 		}
1973 	}
1974 
1975 	if (timeout == 0) {
1976 		ret = 0;
1977 		goto out;
1978 	}
1979 
1980 	cb.base.func = dma_fence_default_wait_cb;
1981 	cb.proc = curproc;
1982 	list_add(&cb.base.node, &fence->cb_list);
1983 
1984 	end = jiffies + timeout;
1985 	for (ret = timeout; ret > 0; ret = MAX(0, end - jiffies)) {
1986 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1987 			break;
1988 		err = msleep(curproc, fence->lock, intr ? PCATCH : 0,
1989 		    "dmafence", ret);
1990 		if (err == EINTR || err == ERESTART) {
1991 			ret = -ERESTARTSYS;
1992 			break;
1993 		}
1994 	}
1995 
1996 	if (!list_empty(&cb.base.node))
1997 		list_del(&cb.base.node);
1998 out:
1999 	mtx_leave(fence->lock);
2000 
2001 	return ret;
2002 }
2003 
2004 static bool
dma_fence_test_signaled_any(struct dma_fence ** fences,uint32_t count,uint32_t * idx)2005 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
2006     uint32_t *idx)
2007 {
2008 	int i;
2009 
2010 	for (i = 0; i < count; ++i) {
2011 		struct dma_fence *fence = fences[i];
2012 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
2013 			if (idx)
2014 				*idx = i;
2015 			return true;
2016 		}
2017 	}
2018 	return false;
2019 }
2020 
2021 long
dma_fence_wait_any_timeout(struct dma_fence ** fences,uint32_t count,bool intr,long timeout,uint32_t * idx)2022 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
2023     bool intr, long timeout, uint32_t *idx)
2024 {
2025 	struct default_wait_cb *cb;
2026 	long ret = timeout;
2027 	unsigned long end;
2028 	int i, err;
2029 
2030 	KASSERT(timeout <= INT_MAX);
2031 
2032 	if (timeout == 0) {
2033 		for (i = 0; i < count; i++) {
2034 			if (dma_fence_is_signaled(fences[i])) {
2035 				if (idx)
2036 					*idx = i;
2037 				return 1;
2038 			}
2039 		}
2040 		return 0;
2041 	}
2042 
2043 	cb = mallocarray(count, sizeof(*cb), M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
2044 	if (cb == NULL)
2045 		return -ENOMEM;
2046 
2047 	for (i = 0; i < count; i++) {
2048 		struct dma_fence *fence = fences[i];
2049 		cb[i].proc = curproc;
2050 		if (dma_fence_add_callback(fence, &cb[i].base,
2051 		    dma_fence_default_wait_cb)) {
2052 			if (idx)
2053 				*idx = i;
2054 			goto cb_cleanup;
2055 		}
2056 	}
2057 
2058 	end = jiffies + timeout;
2059 	for (ret = timeout; ret > 0; ret = MAX(0, end - jiffies)) {
2060 		if (dma_fence_test_signaled_any(fences, count, idx))
2061 			break;
2062 		err = tsleep(curproc, intr ? PCATCH : 0, "dfwat", ret);
2063 		if (err == EINTR || err == ERESTART) {
2064 			ret = -ERESTARTSYS;
2065 			break;
2066 		}
2067 	}
2068 
2069 cb_cleanup:
2070 	while (i-- > 0)
2071 		dma_fence_remove_callback(fences[i], &cb[i].base);
2072 	free(cb, M_DRM, count * sizeof(*cb));
2073 	return ret;
2074 }
2075 
2076 void
dma_fence_set_deadline(struct dma_fence * f,ktime_t t)2077 dma_fence_set_deadline(struct dma_fence *f, ktime_t t)
2078 {
2079 	if (f->ops->set_deadline == NULL)
2080 		return;
2081 	if (dma_fence_is_signaled(f) == false)
2082 		f->ops->set_deadline(f, t);
2083 }
2084 
2085 static struct dma_fence dma_fence_stub;
2086 static struct mutex dma_fence_stub_mtx = MUTEX_INITIALIZER(IPL_TTY);
2087 
2088 static const char *
dma_fence_stub_get_name(struct dma_fence * fence)2089 dma_fence_stub_get_name(struct dma_fence *fence)
2090 {
2091 	return "stub";
2092 }
2093 
2094 static const struct dma_fence_ops dma_fence_stub_ops = {
2095 	.get_driver_name = dma_fence_stub_get_name,
2096 	.get_timeline_name = dma_fence_stub_get_name,
2097 };
2098 
2099 struct dma_fence *
dma_fence_get_stub(void)2100 dma_fence_get_stub(void)
2101 {
2102 	mtx_enter(&dma_fence_stub_mtx);
2103 	if (dma_fence_stub.ops == NULL) {
2104 		dma_fence_init(&dma_fence_stub, &dma_fence_stub_ops,
2105 		    &dma_fence_stub_mtx, 0, 0);
2106 		dma_fence_signal_locked(&dma_fence_stub);
2107 	}
2108 	mtx_leave(&dma_fence_stub_mtx);
2109 
2110 	return dma_fence_get(&dma_fence_stub);
2111 }
2112 
2113 struct dma_fence *
dma_fence_allocate_private_stub(ktime_t ts)2114 dma_fence_allocate_private_stub(ktime_t ts)
2115 {
2116 	struct dma_fence *f = malloc(sizeof(*f), M_DRM,
2117 	    M_ZERO | M_WAITOK | M_CANFAIL);
2118 	if (f == NULL)
2119 		return NULL;
2120 	dma_fence_init(f, &dma_fence_stub_ops, &dma_fence_stub_mtx, 0, 0);
2121 	dma_fence_signal_timestamp(f, ts);
2122 	return f;
2123 }
2124 
2125 static const char *
dma_fence_array_get_driver_name(struct dma_fence * fence)2126 dma_fence_array_get_driver_name(struct dma_fence *fence)
2127 {
2128 	return "dma_fence_array";
2129 }
2130 
2131 static const char *
dma_fence_array_get_timeline_name(struct dma_fence * fence)2132 dma_fence_array_get_timeline_name(struct dma_fence *fence)
2133 {
2134 	return "unbound";
2135 }
2136 
2137 static void
irq_dma_fence_array_work(void * arg)2138 irq_dma_fence_array_work(void *arg)
2139 {
2140 	struct dma_fence_array *dfa = (struct dma_fence_array *)arg;
2141 	dma_fence_signal(&dfa->base);
2142 	dma_fence_put(&dfa->base);
2143 }
2144 
2145 static void
dma_fence_array_cb_func(struct dma_fence * f,struct dma_fence_cb * cb)2146 dma_fence_array_cb_func(struct dma_fence *f, struct dma_fence_cb *cb)
2147 {
2148 	struct dma_fence_array_cb *array_cb =
2149 	    container_of(cb, struct dma_fence_array_cb, cb);
2150 	struct dma_fence_array *dfa = array_cb->array;
2151 
2152 	if (atomic_dec_and_test(&dfa->num_pending))
2153 		timeout_add(&dfa->to, 1);
2154 	else
2155 		dma_fence_put(&dfa->base);
2156 }
2157 
2158 static bool
dma_fence_array_enable_signaling(struct dma_fence * fence)2159 dma_fence_array_enable_signaling(struct dma_fence *fence)
2160 {
2161 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
2162 	struct dma_fence_array_cb *cb = (void *)(&dfa[1]);
2163 	int i;
2164 
2165 	for (i = 0; i < dfa->num_fences; ++i) {
2166 		cb[i].array = dfa;
2167 		dma_fence_get(&dfa->base);
2168 		if (dma_fence_add_callback(dfa->fences[i], &cb[i].cb,
2169 		    dma_fence_array_cb_func)) {
2170 			dma_fence_put(&dfa->base);
2171 			if (atomic_dec_and_test(&dfa->num_pending))
2172 				return false;
2173 		}
2174 	}
2175 
2176 	return true;
2177 }
2178 
2179 static bool
dma_fence_array_signaled(struct dma_fence * fence)2180 dma_fence_array_signaled(struct dma_fence *fence)
2181 {
2182 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
2183 
2184 	return atomic_read(&dfa->num_pending) <= 0;
2185 }
2186 
2187 static void
dma_fence_array_release(struct dma_fence * fence)2188 dma_fence_array_release(struct dma_fence *fence)
2189 {
2190 	struct dma_fence_array *dfa = to_dma_fence_array(fence);
2191 	int i;
2192 
2193 	for (i = 0; i < dfa->num_fences; ++i)
2194 		dma_fence_put(dfa->fences[i]);
2195 
2196 	free(dfa->fences, M_DRM, 0);
2197 	dma_fence_free(fence);
2198 }
2199 
2200 struct dma_fence_array *
dma_fence_array_create(int num_fences,struct dma_fence ** fences,u64 context,unsigned seqno,bool signal_on_any)2201 dma_fence_array_create(int num_fences, struct dma_fence **fences, u64 context,
2202     unsigned seqno, bool signal_on_any)
2203 {
2204 	struct dma_fence_array *dfa = malloc(sizeof(*dfa) +
2205 	    (num_fences * sizeof(struct dma_fence_array_cb)),
2206 	    M_DRM, M_WAITOK|M_CANFAIL|M_ZERO);
2207 	if (dfa == NULL)
2208 		return NULL;
2209 
2210 	mtx_init(&dfa->lock, IPL_TTY);
2211 	dma_fence_init(&dfa->base, &dma_fence_array_ops, &dfa->lock,
2212 	    context, seqno);
2213 	timeout_set(&dfa->to, irq_dma_fence_array_work, dfa);
2214 
2215 	dfa->num_fences = num_fences;
2216 	atomic_set(&dfa->num_pending, signal_on_any ? 1 : num_fences);
2217 	dfa->fences = fences;
2218 
2219 	return dfa;
2220 }
2221 
2222 struct dma_fence *
dma_fence_array_first(struct dma_fence * f)2223 dma_fence_array_first(struct dma_fence *f)
2224 {
2225 	struct dma_fence_array *dfa;
2226 
2227 	if (f == NULL)
2228 		return NULL;
2229 
2230 	if ((dfa = to_dma_fence_array(f)) == NULL)
2231 		return f;
2232 
2233 	if (dfa->num_fences > 0)
2234 		return dfa->fences[0];
2235 
2236 	return NULL;
2237 }
2238 
2239 struct dma_fence *
dma_fence_array_next(struct dma_fence * f,unsigned int i)2240 dma_fence_array_next(struct dma_fence *f, unsigned int i)
2241 {
2242 	struct dma_fence_array *dfa;
2243 
2244 	if (f == NULL)
2245 		return NULL;
2246 
2247 	if ((dfa = to_dma_fence_array(f)) == NULL)
2248 		return NULL;
2249 
2250 	if (i < dfa->num_fences)
2251 		return dfa->fences[i];
2252 
2253 	return NULL;
2254 }
2255 
2256 const struct dma_fence_ops dma_fence_array_ops = {
2257 	.get_driver_name = dma_fence_array_get_driver_name,
2258 	.get_timeline_name = dma_fence_array_get_timeline_name,
2259 	.enable_signaling = dma_fence_array_enable_signaling,
2260 	.signaled = dma_fence_array_signaled,
2261 	.release = dma_fence_array_release,
2262 };
2263 
2264 int
dma_fence_chain_find_seqno(struct dma_fence ** df,uint64_t seqno)2265 dma_fence_chain_find_seqno(struct dma_fence **df, uint64_t seqno)
2266 {
2267 	struct dma_fence_chain *chain;
2268 	struct dma_fence *fence;
2269 
2270 	if (seqno == 0)
2271 		return 0;
2272 
2273 	if ((chain = to_dma_fence_chain(*df)) == NULL)
2274 		return -EINVAL;
2275 
2276 	fence = &chain->base;
2277 	if (fence->seqno < seqno)
2278 		return -EINVAL;
2279 
2280 	dma_fence_chain_for_each(*df, fence) {
2281 		if ((*df)->context != fence->context)
2282 			break;
2283 
2284 		chain = to_dma_fence_chain(*df);
2285 		if (chain->prev_seqno < seqno)
2286 			break;
2287 	}
2288 	dma_fence_put(fence);
2289 
2290 	return 0;
2291 }
2292 
2293 void
dma_fence_chain_init(struct dma_fence_chain * chain,struct dma_fence * prev,struct dma_fence * fence,uint64_t seqno)2294 dma_fence_chain_init(struct dma_fence_chain *chain, struct dma_fence *prev,
2295     struct dma_fence *fence, uint64_t seqno)
2296 {
2297 	uint64_t context;
2298 
2299 	chain->fence = fence;
2300 	chain->prev = prev;
2301 	mtx_init(&chain->lock, IPL_TTY);
2302 
2303 	/* if prev is a chain */
2304 	if (to_dma_fence_chain(prev) != NULL) {
2305 		if (__dma_fence_is_later(seqno, prev->seqno, prev->ops)) {
2306 			chain->prev_seqno = prev->seqno;
2307 			context = prev->context;
2308 		} else {
2309 			chain->prev_seqno = 0;
2310 			context = dma_fence_context_alloc(1);
2311 			seqno = prev->seqno;
2312 		}
2313 	} else {
2314 		chain->prev_seqno = 0;
2315 		context = dma_fence_context_alloc(1);
2316 	}
2317 
2318 	dma_fence_init(&chain->base, &dma_fence_chain_ops, &chain->lock,
2319 	    context, seqno);
2320 }
2321 
2322 static const char *
dma_fence_chain_get_driver_name(struct dma_fence * fence)2323 dma_fence_chain_get_driver_name(struct dma_fence *fence)
2324 {
2325 	return "dma_fence_chain";
2326 }
2327 
2328 static const char *
dma_fence_chain_get_timeline_name(struct dma_fence * fence)2329 dma_fence_chain_get_timeline_name(struct dma_fence *fence)
2330 {
2331 	return "unbound";
2332 }
2333 
2334 static bool dma_fence_chain_enable_signaling(struct dma_fence *);
2335 
2336 static void
dma_fence_chain_timo(void * arg)2337 dma_fence_chain_timo(void *arg)
2338 {
2339 	struct dma_fence_chain *chain = (struct dma_fence_chain *)arg;
2340 
2341 	if (dma_fence_chain_enable_signaling(&chain->base) == false)
2342 		dma_fence_signal(&chain->base);
2343 	dma_fence_put(&chain->base);
2344 }
2345 
2346 static void
dma_fence_chain_cb(struct dma_fence * f,struct dma_fence_cb * cb)2347 dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb)
2348 {
2349 	struct dma_fence_chain *chain =
2350 	    container_of(cb, struct dma_fence_chain, cb);
2351 	timeout_set(&chain->to, dma_fence_chain_timo, chain);
2352 	timeout_add(&chain->to, 1);
2353 	dma_fence_put(f);
2354 }
2355 
2356 static bool
dma_fence_chain_enable_signaling(struct dma_fence * fence)2357 dma_fence_chain_enable_signaling(struct dma_fence *fence)
2358 {
2359 	struct dma_fence_chain *chain, *h;
2360 	struct dma_fence *f;
2361 
2362 	h = to_dma_fence_chain(fence);
2363 	dma_fence_get(&h->base);
2364 	dma_fence_chain_for_each(fence, &h->base) {
2365 		chain = to_dma_fence_chain(fence);
2366 		if (chain == NULL)
2367 			f = fence;
2368 		else
2369 			f = chain->fence;
2370 
2371 		dma_fence_get(f);
2372 		if (!dma_fence_add_callback(f, &h->cb, dma_fence_chain_cb)) {
2373 			dma_fence_put(fence);
2374 			return true;
2375 		}
2376 		dma_fence_put(f);
2377 	}
2378 	dma_fence_put(&h->base);
2379 	return false;
2380 }
2381 
2382 static bool
dma_fence_chain_signaled(struct dma_fence * fence)2383 dma_fence_chain_signaled(struct dma_fence *fence)
2384 {
2385 	struct dma_fence_chain *chain;
2386 	struct dma_fence *f;
2387 
2388 	dma_fence_chain_for_each(fence, fence) {
2389 		chain = to_dma_fence_chain(fence);
2390 		if (chain == NULL)
2391 			f = fence;
2392 		else
2393 			f = chain->fence;
2394 
2395 		if (dma_fence_is_signaled(f) == false) {
2396 			dma_fence_put(fence);
2397 			return false;
2398 		}
2399 	}
2400 	return true;
2401 }
2402 
2403 static void
dma_fence_chain_release(struct dma_fence * fence)2404 dma_fence_chain_release(struct dma_fence *fence)
2405 {
2406 	struct dma_fence_chain *chain = to_dma_fence_chain(fence);
2407 	struct dma_fence_chain *prev_chain;
2408 	struct dma_fence *prev;
2409 
2410 	for (prev = chain->prev; prev != NULL; prev = chain->prev) {
2411 		if (kref_read(&prev->refcount) > 1)
2412 			break;
2413 		if ((prev_chain = to_dma_fence_chain(prev)) == NULL)
2414 			break;
2415 		chain->prev = prev_chain->prev;
2416 		prev_chain->prev = NULL;
2417 		dma_fence_put(prev);
2418 	}
2419 	dma_fence_put(prev);
2420 	dma_fence_put(chain->fence);
2421 	dma_fence_free(fence);
2422 }
2423 
2424 struct dma_fence *
dma_fence_chain_walk(struct dma_fence * fence)2425 dma_fence_chain_walk(struct dma_fence *fence)
2426 {
2427 	struct dma_fence_chain *chain = to_dma_fence_chain(fence), *prev_chain;
2428 	struct dma_fence *prev, *new_prev, *tmp;
2429 
2430 	if (chain == NULL) {
2431 		dma_fence_put(fence);
2432 		return NULL;
2433 	}
2434 
2435 	while ((prev = dma_fence_get(chain->prev)) != NULL) {
2436 		prev_chain = to_dma_fence_chain(prev);
2437 		if (prev_chain != NULL) {
2438 			if (!dma_fence_is_signaled(prev_chain->fence))
2439 				break;
2440 			new_prev = dma_fence_get(prev_chain->prev);
2441 		} else {
2442 			if (!dma_fence_is_signaled(prev))
2443 				break;
2444 			new_prev = NULL;
2445 		}
2446 		tmp = atomic_cas_ptr(&chain->prev, prev, new_prev);
2447 		dma_fence_put(tmp == prev ? prev : new_prev);
2448 		dma_fence_put(prev);
2449 	}
2450 
2451 	dma_fence_put(fence);
2452 	return prev;
2453 }
2454 
2455 const struct dma_fence_ops dma_fence_chain_ops = {
2456 	.get_driver_name = dma_fence_chain_get_driver_name,
2457 	.get_timeline_name = dma_fence_chain_get_timeline_name,
2458 	.enable_signaling = dma_fence_chain_enable_signaling,
2459 	.signaled = dma_fence_chain_signaled,
2460 	.release = dma_fence_chain_release,
2461 	.use_64bit_seqno = true,
2462 };
2463 
2464 bool
dma_fence_is_container(struct dma_fence * fence)2465 dma_fence_is_container(struct dma_fence *fence)
2466 {
2467 	return (fence->ops == &dma_fence_chain_ops) ||
2468 	    (fence->ops == &dma_fence_array_ops);
2469 }
2470 
2471 int
dmabuf_read(struct file * fp,struct uio * uio,int fflags)2472 dmabuf_read(struct file *fp, struct uio *uio, int fflags)
2473 {
2474 	return (ENXIO);
2475 }
2476 
2477 int
dmabuf_write(struct file * fp,struct uio * uio,int fflags)2478 dmabuf_write(struct file *fp, struct uio *uio, int fflags)
2479 {
2480 	return (ENXIO);
2481 }
2482 
2483 int
dmabuf_ioctl(struct file * fp,u_long com,caddr_t data,struct proc * p)2484 dmabuf_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
2485 {
2486 	return (ENOTTY);
2487 }
2488 
2489 int
dmabuf_kqfilter(struct file * fp,struct knote * kn)2490 dmabuf_kqfilter(struct file *fp, struct knote *kn)
2491 {
2492 	return (EINVAL);
2493 }
2494 
2495 int
dmabuf_stat(struct file * fp,struct stat * st,struct proc * p)2496 dmabuf_stat(struct file *fp, struct stat *st, struct proc *p)
2497 {
2498 	struct dma_buf *dmabuf = fp->f_data;
2499 
2500 	memset(st, 0, sizeof(*st));
2501 	st->st_size = dmabuf->size;
2502 	st->st_mode = S_IFIFO;	/* XXX */
2503 	return (0);
2504 }
2505 
2506 int
dmabuf_close(struct file * fp,struct proc * p)2507 dmabuf_close(struct file *fp, struct proc *p)
2508 {
2509 	struct dma_buf *dmabuf = fp->f_data;
2510 
2511 	fp->f_data = NULL;
2512 	KERNEL_LOCK();
2513 	dmabuf->ops->release(dmabuf);
2514 	KERNEL_UNLOCK();
2515 	free(dmabuf, M_DRM, sizeof(struct dma_buf));
2516 	return (0);
2517 }
2518 
2519 int
dmabuf_seek(struct file * fp,off_t * offset,int whence,struct proc * p)2520 dmabuf_seek(struct file *fp, off_t *offset, int whence, struct proc *p)
2521 {
2522 	struct dma_buf *dmabuf = fp->f_data;
2523 	off_t newoff;
2524 
2525 	if (*offset != 0)
2526 		return (EINVAL);
2527 
2528 	switch (whence) {
2529 	case SEEK_SET:
2530 		newoff = 0;
2531 		break;
2532 	case SEEK_END:
2533 		newoff = dmabuf->size;
2534 		break;
2535 	default:
2536 		return (EINVAL);
2537 	}
2538 	mtx_enter(&fp->f_mtx);
2539 	fp->f_offset = newoff;
2540 	mtx_leave(&fp->f_mtx);
2541 	*offset = newoff;
2542 	return (0);
2543 }
2544 
2545 const struct fileops dmabufops = {
2546 	.fo_read	= dmabuf_read,
2547 	.fo_write	= dmabuf_write,
2548 	.fo_ioctl	= dmabuf_ioctl,
2549 	.fo_kqfilter	= dmabuf_kqfilter,
2550 	.fo_stat	= dmabuf_stat,
2551 	.fo_close	= dmabuf_close,
2552 	.fo_seek	= dmabuf_seek,
2553 };
2554 
2555 struct dma_buf *
dma_buf_export(const struct dma_buf_export_info * info)2556 dma_buf_export(const struct dma_buf_export_info *info)
2557 {
2558 	struct proc *p = curproc;
2559 	struct dma_buf *dmabuf;
2560 	struct file *fp;
2561 
2562 	fp = fnew(p);
2563 	if (fp == NULL)
2564 		return ERR_PTR(-ENFILE);
2565 	fp->f_type = DTYPE_DMABUF;
2566 	fp->f_ops = &dmabufops;
2567 	dmabuf = malloc(sizeof(struct dma_buf), M_DRM, M_WAITOK | M_ZERO);
2568 	dmabuf->priv = info->priv;
2569 	dmabuf->ops = info->ops;
2570 	dmabuf->size = info->size;
2571 	dmabuf->file = fp;
2572 	fp->f_data = dmabuf;
2573 	INIT_LIST_HEAD(&dmabuf->attachments);
2574 	return dmabuf;
2575 }
2576 
2577 struct dma_buf *
dma_buf_get(int fd)2578 dma_buf_get(int fd)
2579 {
2580 	struct proc *p = curproc;
2581 	struct filedesc *fdp = p->p_fd;
2582 	struct file *fp;
2583 
2584 	if ((fp = fd_getfile(fdp, fd)) == NULL)
2585 		return ERR_PTR(-EBADF);
2586 
2587 	if (fp->f_type != DTYPE_DMABUF) {
2588 		FRELE(fp, p);
2589 		return ERR_PTR(-EINVAL);
2590 	}
2591 
2592 	return fp->f_data;
2593 }
2594 
2595 void
dma_buf_put(struct dma_buf * dmabuf)2596 dma_buf_put(struct dma_buf *dmabuf)
2597 {
2598 	KASSERT(dmabuf);
2599 	KASSERT(dmabuf->file);
2600 
2601 	FRELE(dmabuf->file, curproc);
2602 }
2603 
2604 int
dma_buf_fd(struct dma_buf * dmabuf,int flags)2605 dma_buf_fd(struct dma_buf *dmabuf, int flags)
2606 {
2607 	struct proc *p = curproc;
2608 	struct filedesc *fdp = p->p_fd;
2609 	struct file *fp = dmabuf->file;
2610 	int fd, cloexec, error;
2611 
2612 	cloexec = (flags & O_CLOEXEC) ? UF_EXCLOSE : 0;
2613 
2614 	fdplock(fdp);
2615 restart:
2616 	if ((error = fdalloc(p, 0, &fd)) != 0) {
2617 		if (error == ENOSPC) {
2618 			fdexpand(p);
2619 			goto restart;
2620 		}
2621 		fdpunlock(fdp);
2622 		return -error;
2623 	}
2624 
2625 	fdinsert(fdp, fd, cloexec, fp);
2626 	fdpunlock(fdp);
2627 
2628 	return fd;
2629 }
2630 
2631 void
get_dma_buf(struct dma_buf * dmabuf)2632 get_dma_buf(struct dma_buf *dmabuf)
2633 {
2634 	FREF(dmabuf->file);
2635 }
2636 
2637 enum pci_bus_speed
pcie_get_speed_cap(struct pci_dev * pdev)2638 pcie_get_speed_cap(struct pci_dev *pdev)
2639 {
2640 	pci_chipset_tag_t	pc;
2641 	pcitag_t		tag;
2642 	int			pos ;
2643 	pcireg_t		xcap, lnkcap = 0, lnkcap2 = 0;
2644 	pcireg_t		id;
2645 	enum pci_bus_speed	cap = PCI_SPEED_UNKNOWN;
2646 	int			bus, device, function;
2647 
2648 	if (pdev == NULL)
2649 		return PCI_SPEED_UNKNOWN;
2650 
2651 	pc = pdev->pc;
2652 	tag = pdev->tag;
2653 
2654 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
2655 	    &pos, NULL))
2656 		return PCI_SPEED_UNKNOWN;
2657 
2658 	id = pci_conf_read(pc, tag, PCI_ID_REG);
2659 	pci_decompose_tag(pc, tag, &bus, &device, &function);
2660 
2661 	/* we've been informed via and serverworks don't make the cut */
2662 	if (PCI_VENDOR(id) == PCI_VENDOR_VIATECH ||
2663 	    PCI_VENDOR(id) == PCI_VENDOR_RCC)
2664 		return PCI_SPEED_UNKNOWN;
2665 
2666 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
2667 	xcap = pci_conf_read(pc, tag, pos + PCI_PCIE_XCAP);
2668 	if (PCI_PCIE_XCAP_VER(xcap) >= 2)
2669 		lnkcap2 = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP2);
2670 
2671 	lnkcap &= 0x0f;
2672 	lnkcap2 &= 0xfe;
2673 
2674 	if (lnkcap2) { /* PCIE GEN 3.0 */
2675 		if (lnkcap2 & 0x02)
2676 			cap = PCIE_SPEED_2_5GT;
2677 		if (lnkcap2 & 0x04)
2678 			cap = PCIE_SPEED_5_0GT;
2679 		if (lnkcap2 & 0x08)
2680 			cap = PCIE_SPEED_8_0GT;
2681 		if (lnkcap2 & 0x10)
2682 			cap = PCIE_SPEED_16_0GT;
2683 		if (lnkcap2 & 0x20)
2684 			cap = PCIE_SPEED_32_0GT;
2685 		if (lnkcap2 & 0x40)
2686 			cap = PCIE_SPEED_64_0GT;
2687 	} else {
2688 		if (lnkcap & 0x01)
2689 			cap = PCIE_SPEED_2_5GT;
2690 		if (lnkcap & 0x02)
2691 			cap = PCIE_SPEED_5_0GT;
2692 	}
2693 
2694 	DRM_INFO("probing pcie caps for device %d:%d:%d 0x%04x:0x%04x = %x/%x\n",
2695 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap,
2696 	    lnkcap2);
2697 	return cap;
2698 }
2699 
2700 enum pcie_link_width
pcie_get_width_cap(struct pci_dev * pdev)2701 pcie_get_width_cap(struct pci_dev *pdev)
2702 {
2703 	pci_chipset_tag_t	pc = pdev->pc;
2704 	pcitag_t		tag = pdev->tag;
2705 	int			pos ;
2706 	pcireg_t		lnkcap = 0;
2707 	pcireg_t		id;
2708 	int			bus, device, function;
2709 
2710 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
2711 	    &pos, NULL))
2712 		return PCIE_LNK_WIDTH_UNKNOWN;
2713 
2714 	id = pci_conf_read(pc, tag, PCI_ID_REG);
2715 	pci_decompose_tag(pc, tag, &bus, &device, &function);
2716 
2717 	lnkcap = pci_conf_read(pc, tag, pos + PCI_PCIE_LCAP);
2718 
2719 	DRM_INFO("probing pcie width for device %d:%d:%d 0x%04x:0x%04x = %x\n",
2720 	    bus, device, function, PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap);
2721 
2722 	if (lnkcap)
2723 		return (lnkcap & 0x3f0) >> 4;
2724 	return PCIE_LNK_WIDTH_UNKNOWN;
2725 }
2726 
2727 bool
pcie_aspm_enabled(struct pci_dev * pdev)2728 pcie_aspm_enabled(struct pci_dev *pdev)
2729 {
2730 	pci_chipset_tag_t	pc = pdev->pc;
2731 	pcitag_t		tag = pdev->tag;
2732 	int			pos ;
2733 	pcireg_t		lcsr;
2734 
2735 	if (!pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS,
2736 	    &pos, NULL))
2737 		return false;
2738 
2739 	lcsr = pci_conf_read(pc, tag, pos + PCI_PCIE_LCSR);
2740 	if ((lcsr & (PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1)) != 0)
2741 		return true;
2742 
2743 	return false;
2744 }
2745 
2746 static wait_queue_head_t bit_waitq;
2747 wait_queue_head_t var_waitq;
2748 struct mutex wait_bit_mtx = MUTEX_INITIALIZER(IPL_TTY);
2749 
2750 int
wait_on_bit(unsigned long * word,int bit,unsigned mode)2751 wait_on_bit(unsigned long *word, int bit, unsigned mode)
2752 {
2753 	int err;
2754 
2755 	if (!test_bit(bit, word))
2756 		return 0;
2757 
2758 	mtx_enter(&wait_bit_mtx);
2759 	while (test_bit(bit, word)) {
2760 		err = msleep_nsec(word, &wait_bit_mtx, PWAIT | mode, "wtb",
2761 		    INFSLP);
2762 		if (err) {
2763 			mtx_leave(&wait_bit_mtx);
2764 			return 1;
2765 		}
2766 	}
2767 	mtx_leave(&wait_bit_mtx);
2768 	return 0;
2769 }
2770 
2771 int
wait_on_bit_timeout(unsigned long * word,int bit,unsigned mode,int timo)2772 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode, int timo)
2773 {
2774 	int err;
2775 
2776 	if (!test_bit(bit, word))
2777 		return 0;
2778 
2779 	mtx_enter(&wait_bit_mtx);
2780 	while (test_bit(bit, word)) {
2781 		err = msleep(word, &wait_bit_mtx, PWAIT | mode, "wtb", timo);
2782 		if (err) {
2783 			mtx_leave(&wait_bit_mtx);
2784 			return 1;
2785 		}
2786 	}
2787 	mtx_leave(&wait_bit_mtx);
2788 	return 0;
2789 }
2790 
2791 void
wake_up_bit(void * word,int bit)2792 wake_up_bit(void *word, int bit)
2793 {
2794 	mtx_enter(&wait_bit_mtx);
2795 	wakeup(word);
2796 	mtx_leave(&wait_bit_mtx);
2797 }
2798 
2799 void
clear_and_wake_up_bit(int bit,void * word)2800 clear_and_wake_up_bit(int bit, void *word)
2801 {
2802 	clear_bit(bit, word);
2803 	wake_up_bit(word, bit);
2804 }
2805 
2806 wait_queue_head_t *
bit_waitqueue(void * word,int bit)2807 bit_waitqueue(void *word, int bit)
2808 {
2809 	/* XXX hash table of wait queues? */
2810 	return &bit_waitq;
2811 }
2812 
2813 wait_queue_head_t *
__var_waitqueue(void * p)2814 __var_waitqueue(void *p)
2815 {
2816 	/* XXX hash table of wait queues? */
2817 	return &bit_waitq;
2818 }
2819 
2820 struct workqueue_struct *system_wq;
2821 struct workqueue_struct *system_highpri_wq;
2822 struct workqueue_struct *system_unbound_wq;
2823 struct workqueue_struct *system_long_wq;
2824 struct taskq *taskletq;
2825 
2826 void
drm_linux_init(void)2827 drm_linux_init(void)
2828 {
2829 	system_wq = (struct workqueue_struct *)
2830 	    taskq_create("drmwq", 4, IPL_HIGH, 0);
2831 	system_highpri_wq = (struct workqueue_struct *)
2832 	    taskq_create("drmhpwq", 4, IPL_HIGH, 0);
2833 	system_unbound_wq = (struct workqueue_struct *)
2834 	    taskq_create("drmubwq", 4, IPL_HIGH, 0);
2835 	system_long_wq = (struct workqueue_struct *)
2836 	    taskq_create("drmlwq", 4, IPL_HIGH, 0);
2837 
2838 	taskletq = taskq_create("drmtskl", 1, IPL_HIGH, 0);
2839 
2840 	init_waitqueue_head(&bit_waitq);
2841 	init_waitqueue_head(&var_waitq);
2842 
2843 	pool_init(&idr_pool, sizeof(struct idr_entry), 0, IPL_TTY, 0,
2844 	    "idrpl", NULL);
2845 
2846 	kmap_atomic_va =
2847 	    (vaddr_t)km_alloc(PAGE_SIZE, &kv_any, &kp_none, &kd_waitok);
2848 }
2849 
2850 void
drm_linux_exit(void)2851 drm_linux_exit(void)
2852 {
2853 	pool_destroy(&idr_pool);
2854 
2855 	taskq_destroy(taskletq);
2856 
2857 	taskq_destroy((struct taskq *)system_long_wq);
2858 	taskq_destroy((struct taskq *)system_unbound_wq);
2859 	taskq_destroy((struct taskq *)system_highpri_wq);
2860 	taskq_destroy((struct taskq *)system_wq);
2861 }
2862 
2863 #define PCIE_ECAP_RESIZE_BAR	0x15
2864 #define RBCAP0			0x04
2865 #define RBCTRL0			0x08
2866 #define RBCTRL_BARINDEX_MASK	0x07
2867 #define RBCTRL_BARSIZE_MASK	0x1f00
2868 #define RBCTRL_BARSIZE_SHIFT	8
2869 
2870 /* size in MB is 1 << nsize */
2871 int
pci_resize_resource(struct pci_dev * pdev,int bar,int nsize)2872 pci_resize_resource(struct pci_dev *pdev, int bar, int nsize)
2873 {
2874 	pcireg_t	reg;
2875 	uint32_t	offset, capid;
2876 
2877 	KASSERT(bar == 0);
2878 
2879 	offset = PCI_PCIE_ECAP;
2880 
2881 	/* search PCI Express Extended Capabilities */
2882 	do {
2883 		reg = pci_conf_read(pdev->pc, pdev->tag, offset);
2884 		capid = PCI_PCIE_ECAP_ID(reg);
2885 		if (capid == PCIE_ECAP_RESIZE_BAR)
2886 			break;
2887 		offset = PCI_PCIE_ECAP_NEXT(reg);
2888 	} while (capid != 0);
2889 
2890 	if (capid == 0) {
2891 		printf("%s: could not find resize bar cap!\n", __func__);
2892 		return -ENOTSUP;
2893 	}
2894 
2895 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCAP0);
2896 
2897 	if ((reg & (1 << (nsize + 4))) == 0) {
2898 		printf("%s size not supported\n", __func__);
2899 		return -ENOTSUP;
2900 	}
2901 
2902 	reg = pci_conf_read(pdev->pc, pdev->tag, offset + RBCTRL0);
2903 	if ((reg & RBCTRL_BARINDEX_MASK) != 0) {
2904 		printf("%s BAR index not 0\n", __func__);
2905 		return -EINVAL;
2906 	}
2907 
2908 	reg &= ~RBCTRL_BARSIZE_MASK;
2909 	reg |= (nsize << RBCTRL_BARSIZE_SHIFT) & RBCTRL_BARSIZE_MASK;
2910 
2911 	pci_conf_write(pdev->pc, pdev->tag, offset + RBCTRL0, reg);
2912 
2913 	return 0;
2914 }
2915 
2916 TAILQ_HEAD(, shrinker) shrinkers = TAILQ_HEAD_INITIALIZER(shrinkers);
2917 
2918 int
register_shrinker(struct shrinker * shrinker,const char * format,...)2919 register_shrinker(struct shrinker *shrinker, const char *format, ...)
2920 {
2921 	TAILQ_INSERT_TAIL(&shrinkers, shrinker, next);
2922 	return 0;
2923 }
2924 
2925 void
unregister_shrinker(struct shrinker * shrinker)2926 unregister_shrinker(struct shrinker *shrinker)
2927 {
2928 	TAILQ_REMOVE(&shrinkers, shrinker, next);
2929 }
2930 
2931 void
drmbackoff(long npages)2932 drmbackoff(long npages)
2933 {
2934 	struct shrink_control sc;
2935 	struct shrinker *shrinker;
2936 	u_long ret;
2937 
2938 	shrinker = TAILQ_FIRST(&shrinkers);
2939 	while (shrinker && npages > 0) {
2940 		sc.nr_to_scan = npages;
2941 		ret = shrinker->scan_objects(shrinker, &sc);
2942 		npages -= ret;
2943 		shrinker = TAILQ_NEXT(shrinker, next);
2944 	}
2945 }
2946 
2947 void *
bitmap_zalloc(u_int n,gfp_t flags)2948 bitmap_zalloc(u_int n, gfp_t flags)
2949 {
2950 	return kcalloc(BITS_TO_LONGS(n), sizeof(long), flags);
2951 }
2952 
2953 void
bitmap_free(void * p)2954 bitmap_free(void *p)
2955 {
2956 	kfree(p);
2957 }
2958 
2959 int
atomic_dec_and_mutex_lock(volatile int * v,struct rwlock * lock)2960 atomic_dec_and_mutex_lock(volatile int *v, struct rwlock *lock)
2961 {
2962 	if (atomic_add_unless(v, -1, 1))
2963 		return 0;
2964 
2965 	rw_enter_write(lock);
2966 	if (atomic_dec_return(v) == 0)
2967 		return 1;
2968 	rw_exit_write(lock);
2969 	return 0;
2970 }
2971 
2972 int
printk(const char * fmt,...)2973 printk(const char *fmt, ...)
2974 {
2975 	int ret, level;
2976 	va_list ap;
2977 
2978 	if (fmt != NULL && *fmt == '\001') {
2979 		level = fmt[1];
2980 #ifndef DRMDEBUG
2981 		if (level >= KERN_INFO[1] && level <= '9')
2982 			return 0;
2983 #endif
2984 		fmt += 2;
2985 	}
2986 
2987 	va_start(ap, fmt);
2988 	ret = vprintf(fmt, ap);
2989 	va_end(ap);
2990 
2991 	return ret;
2992 }
2993 
2994 #define START(node) ((node)->start)
2995 #define LAST(node) ((node)->last)
2996 
2997 struct interval_tree_node *
interval_tree_iter_first(struct rb_root_cached * root,unsigned long start,unsigned long last)2998 interval_tree_iter_first(struct rb_root_cached *root, unsigned long start,
2999     unsigned long last)
3000 {
3001 	struct interval_tree_node *node;
3002 	struct rb_node *rb;
3003 
3004 	for (rb = rb_first_cached(root); rb; rb = rb_next(rb)) {
3005 		node = rb_entry(rb, typeof(*node), rb);
3006 		if (LAST(node) >= start && START(node) <= last)
3007 			return node;
3008 	}
3009 	return NULL;
3010 }
3011 
3012 void
interval_tree_remove(struct interval_tree_node * node,struct rb_root_cached * root)3013 interval_tree_remove(struct interval_tree_node *node,
3014     struct rb_root_cached *root)
3015 {
3016 	rb_erase_cached(&node->rb, root);
3017 }
3018 
3019 void
interval_tree_insert(struct interval_tree_node * node,struct rb_root_cached * root)3020 interval_tree_insert(struct interval_tree_node *node,
3021     struct rb_root_cached *root)
3022 {
3023 	struct rb_node **iter = &root->rb_root.rb_node;
3024 	struct rb_node *parent = NULL;
3025 	struct interval_tree_node *iter_node;
3026 
3027 	while (*iter) {
3028 		parent = *iter;
3029 		iter_node = rb_entry(*iter, struct interval_tree_node, rb);
3030 
3031 		if (node->start < iter_node->start)
3032 			iter = &(*iter)->rb_left;
3033 		else
3034 			iter = &(*iter)->rb_right;
3035 	}
3036 
3037 	rb_link_node(&node->rb, parent, iter);
3038 	rb_insert_color_cached(&node->rb, root, false);
3039 }
3040 
3041 int
syncfile_read(struct file * fp,struct uio * uio,int fflags)3042 syncfile_read(struct file *fp, struct uio *uio, int fflags)
3043 {
3044 	return ENXIO;
3045 }
3046 
3047 int
syncfile_write(struct file * fp,struct uio * uio,int fflags)3048 syncfile_write(struct file *fp, struct uio *uio, int fflags)
3049 {
3050 	return ENXIO;
3051 }
3052 
3053 int
syncfile_ioctl(struct file * fp,u_long com,caddr_t data,struct proc * p)3054 syncfile_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
3055 {
3056 	return ENOTTY;
3057 }
3058 
3059 int
syncfile_kqfilter(struct file * fp,struct knote * kn)3060 syncfile_kqfilter(struct file *fp, struct knote *kn)
3061 {
3062 	return EINVAL;
3063 }
3064 
3065 int
syncfile_stat(struct file * fp,struct stat * st,struct proc * p)3066 syncfile_stat(struct file *fp, struct stat *st, struct proc *p)
3067 {
3068 	memset(st, 0, sizeof(*st));
3069 	st->st_mode = S_IFIFO;	/* XXX */
3070 	return 0;
3071 }
3072 
3073 int
syncfile_close(struct file * fp,struct proc * p)3074 syncfile_close(struct file *fp, struct proc *p)
3075 {
3076 	struct sync_file *sf = fp->f_data;
3077 
3078 	dma_fence_put(sf->fence);
3079 	fp->f_data = NULL;
3080 	free(sf, M_DRM, sizeof(struct sync_file));
3081 	return 0;
3082 }
3083 
3084 int
syncfile_seek(struct file * fp,off_t * offset,int whence,struct proc * p)3085 syncfile_seek(struct file *fp, off_t *offset, int whence, struct proc *p)
3086 {
3087 	off_t newoff;
3088 
3089 	if (*offset != 0)
3090 		return EINVAL;
3091 
3092 	switch (whence) {
3093 	case SEEK_SET:
3094 		newoff = 0;
3095 		break;
3096 	case SEEK_END:
3097 		newoff = 0;
3098 		break;
3099 	default:
3100 		return EINVAL;
3101 	}
3102 	mtx_enter(&fp->f_mtx);
3103 	fp->f_offset = newoff;
3104 	mtx_leave(&fp->f_mtx);
3105 	*offset = newoff;
3106 	return 0;
3107 }
3108 
3109 const struct fileops syncfileops = {
3110 	.fo_read	= syncfile_read,
3111 	.fo_write	= syncfile_write,
3112 	.fo_ioctl	= syncfile_ioctl,
3113 	.fo_kqfilter	= syncfile_kqfilter,
3114 	.fo_stat	= syncfile_stat,
3115 	.fo_close	= syncfile_close,
3116 	.fo_seek	= syncfile_seek,
3117 };
3118 
3119 void
fd_install(int fd,struct file * fp)3120 fd_install(int fd, struct file *fp)
3121 {
3122 	struct proc *p = curproc;
3123 	struct filedesc *fdp = p->p_fd;
3124 
3125 	if (fp->f_type != DTYPE_SYNC)
3126 		return;
3127 
3128 	fdplock(fdp);
3129 	/* all callers use get_unused_fd_flags(O_CLOEXEC) */
3130 	fdinsert(fdp, fd, UF_EXCLOSE, fp);
3131 	fdpunlock(fdp);
3132 }
3133 
3134 void
fput(struct file * fp)3135 fput(struct file *fp)
3136 {
3137 	if (fp->f_type != DTYPE_SYNC)
3138 		return;
3139 
3140 	FRELE(fp, curproc);
3141 }
3142 
3143 int
get_unused_fd_flags(unsigned int flags)3144 get_unused_fd_flags(unsigned int flags)
3145 {
3146 	struct proc *p = curproc;
3147 	struct filedesc *fdp = p->p_fd;
3148 	int error, fd;
3149 
3150 	KASSERT((flags & O_CLOEXEC) != 0);
3151 
3152 	fdplock(fdp);
3153 retryalloc:
3154 	if ((error = fdalloc(p, 0, &fd)) != 0) {
3155 		if (error == ENOSPC) {
3156 			fdexpand(p);
3157 			goto retryalloc;
3158 		}
3159 		fdpunlock(fdp);
3160 		return -1;
3161 	}
3162 	fdpunlock(fdp);
3163 
3164 	return fd;
3165 }
3166 
3167 void
put_unused_fd(int fd)3168 put_unused_fd(int fd)
3169 {
3170 	struct filedesc *fdp = curproc->p_fd;
3171 
3172 	fdplock(fdp);
3173 	fdremove(fdp, fd);
3174 	fdpunlock(fdp);
3175 }
3176 
3177 struct dma_fence *
sync_file_get_fence(int fd)3178 sync_file_get_fence(int fd)
3179 {
3180 	struct proc *p = curproc;
3181 	struct filedesc *fdp = p->p_fd;
3182 	struct file *fp;
3183 	struct sync_file *sf;
3184 	struct dma_fence *f;
3185 
3186 	if ((fp = fd_getfile(fdp, fd)) == NULL)
3187 		return NULL;
3188 
3189 	if (fp->f_type != DTYPE_SYNC) {
3190 		FRELE(fp, p);
3191 		return NULL;
3192 	}
3193 	sf = fp->f_data;
3194 	f = dma_fence_get(sf->fence);
3195 	FRELE(sf->file, p);
3196 	return f;
3197 }
3198 
3199 struct sync_file *
sync_file_create(struct dma_fence * fence)3200 sync_file_create(struct dma_fence *fence)
3201 {
3202 	struct proc *p = curproc;
3203 	struct sync_file *sf;
3204 	struct file *fp;
3205 
3206 	fp = fnew(p);
3207 	if (fp == NULL)
3208 		return NULL;
3209 	fp->f_type = DTYPE_SYNC;
3210 	fp->f_ops = &syncfileops;
3211 	sf = malloc(sizeof(struct sync_file), M_DRM, M_WAITOK | M_ZERO);
3212 	sf->file = fp;
3213 	sf->fence = dma_fence_get(fence);
3214 	fp->f_data = sf;
3215 	return sf;
3216 }
3217 
3218 bool
drm_firmware_drivers_only(void)3219 drm_firmware_drivers_only(void)
3220 {
3221 	return false;
3222 }
3223 
3224 
3225 void *
memremap(phys_addr_t phys_addr,size_t size,int flags)3226 memremap(phys_addr_t phys_addr, size_t size, int flags)
3227 {
3228 	STUB();
3229 	return NULL;
3230 }
3231 
3232 void
memunmap(void * addr)3233 memunmap(void *addr)
3234 {
3235 	STUB();
3236 }
3237 
3238 #include <linux/platform_device.h>
3239 
3240 bus_dma_tag_t
dma_tag_lookup(struct device * dev)3241 dma_tag_lookup(struct device *dev)
3242 {
3243 	extern struct cfdriver drm_cd;
3244 	struct drm_device *drm;
3245 	int i;
3246 
3247 	for (i = 0; i < drm_cd.cd_ndevs; i++) {
3248 		drm = drm_cd.cd_devs[i];
3249 		if (drm && drm->dev == dev)
3250 			return drm->dmat;
3251 	}
3252 
3253 	return ((struct platform_device *)dev)->dmat;
3254 }
3255 
3256 LIST_HEAD(, drm_dmamem) dmamem_list = LIST_HEAD_INITIALIZER(dmamem_list);
3257 
3258 void *
dma_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,int gfp)3259 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
3260     int gfp)
3261 {
3262 	bus_dma_tag_t dmat = dma_tag_lookup(dev);
3263 	struct drm_dmamem *mem;
3264 
3265 	mem = drm_dmamem_alloc(dmat, size, PAGE_SIZE, 1, size,
3266 	    BUS_DMA_COHERENT, 0);
3267 	if (mem == NULL)
3268 		return NULL;
3269 	*dma_handle = mem->map->dm_segs[0].ds_addr;
3270 	LIST_INSERT_HEAD(&dmamem_list, mem, next);
3271 	return mem->kva;
3272 }
3273 
3274 void
dma_free_coherent(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_handle)3275 dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
3276     dma_addr_t dma_handle)
3277 {
3278 	bus_dma_tag_t dmat = dma_tag_lookup(dev);
3279 	struct drm_dmamem *mem;
3280 
3281 	LIST_FOREACH(mem, &dmamem_list, next) {
3282 		if (mem->kva == cpu_addr)
3283 			break;
3284 	}
3285 	KASSERT(mem);
3286 	KASSERT(mem->size == size);
3287 	KASSERT(mem->map->dm_segs[0].ds_addr == dma_handle);
3288 
3289 	LIST_REMOVE(mem, next);
3290 	drm_dmamem_free(dmat, mem);
3291 }
3292 
3293 int
dma_get_sgtable(struct device * dev,struct sg_table * sgt,void * cpu_addr,dma_addr_t dma_addr,size_t size)3294 dma_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
3295     dma_addr_t dma_addr, size_t size)
3296 {
3297 	paddr_t pa;
3298 	int ret;
3299 
3300 	if (!pmap_extract(pmap_kernel(), (vaddr_t)cpu_addr, &pa))
3301 		return -EINVAL;
3302 
3303 	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
3304 	if (ret)
3305 		return ret;
3306 
3307 	sg_set_page(sgt->sgl, PHYS_TO_VM_PAGE(pa), size, 0);
3308 	return 0;
3309 }
3310 
3311 dma_addr_t
dma_map_resource(struct device * dev,phys_addr_t phys_addr,size_t size,enum dma_data_direction dir,u_long attr)3312 dma_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size,
3313     enum dma_data_direction dir, u_long attr)
3314 {
3315 	bus_dma_tag_t dmat= dma_tag_lookup(dev);
3316 	bus_dmamap_t map;
3317 	bus_dma_segment_t seg;
3318 
3319 	if (bus_dmamap_create(dmat, size, 1, size, 0,
3320 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &map))
3321 		return DMA_MAPPING_ERROR;
3322 	seg.ds_addr = phys_addr;
3323 	seg.ds_len = size;
3324 	if (bus_dmamap_load_raw(dmat, map, &seg, 1, size, BUS_DMA_WAITOK)) {
3325 		bus_dmamap_destroy(dmat, map);
3326 		return DMA_MAPPING_ERROR;
3327 	}
3328 
3329 	return map->dm_segs[0].ds_addr;
3330 }
3331 
3332 #ifdef BUS_DMA_FIXED
3333 
3334 #include <linux/iommu.h>
3335 
3336 size_t
iommu_map_sgtable(struct iommu_domain * domain,u_long iova,struct sg_table * sgt,int prot)3337 iommu_map_sgtable(struct iommu_domain *domain, u_long iova,
3338     struct sg_table *sgt, int prot)
3339 {
3340 	bus_dma_segment_t seg;
3341 	int error;
3342 
3343 	error = bus_dmamap_create(domain->dmat, sgt->sgl->length, 1,
3344 	    sgt->sgl->length, 0, BUS_DMA_WAITOK, &sgt->dmamap);
3345 	if (error)
3346 		return -ENOMEM;
3347 
3348 	sgt->dmamap->dm_segs[0].ds_addr = iova;
3349 	sgt->dmamap->dm_segs[0].ds_len = sgt->sgl->length;
3350 	sgt->dmamap->dm_nsegs = 1;
3351 	seg.ds_addr = VM_PAGE_TO_PHYS(sgt->sgl->__page);
3352 	seg.ds_len = sgt->sgl->length;
3353 	error = bus_dmamap_load_raw(domain->dmat, sgt->dmamap, &seg, 1,
3354 	    sgt->sgl->length, BUS_DMA_WAITOK | BUS_DMA_FIXED);
3355 	if (error)
3356 		return -ENOMEM;
3357 
3358 	return sg_dma_len(sgt->sgl);
3359 }
3360 
3361 size_t
iommu_unmap(struct iommu_domain * domain,u_long iova,size_t size)3362 iommu_unmap(struct iommu_domain *domain, u_long iova, size_t size)
3363 {
3364 	STUB();
3365 	return 0;
3366 }
3367 
3368 struct iommu_domain *
iommu_get_domain_for_dev(struct device * dev)3369 iommu_get_domain_for_dev(struct device *dev)
3370 {
3371 	STUB();
3372 	return NULL;
3373 }
3374 
3375 phys_addr_t
iommu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)3376 iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
3377 {
3378 	STUB();
3379 	return 0;
3380 }
3381 
3382 struct iommu_domain *
iommu_domain_alloc(struct bus_type * type)3383 iommu_domain_alloc(struct bus_type *type)
3384 {
3385 	return malloc(sizeof(struct iommu_domain), M_DEVBUF, M_WAITOK | M_ZERO);
3386 }
3387 
3388 int
iommu_attach_device(struct iommu_domain * domain,struct device * dev)3389 iommu_attach_device(struct iommu_domain *domain, struct device *dev)
3390 {
3391 	struct platform_device *pdev = (struct platform_device *)dev;
3392 
3393 	domain->dmat = pdev->dmat;
3394 	return 0;
3395 }
3396 
3397 #endif
3398 
3399 #include <linux/component.h>
3400 
3401 struct component {
3402 	struct device *dev;
3403 	struct device *adev;
3404 	const struct component_ops *ops;
3405 	SLIST_ENTRY(component) next;
3406 };
3407 
3408 SLIST_HEAD(,component) component_list = SLIST_HEAD_INITIALIZER(component_list);
3409 
3410 int
component_add(struct device * dev,const struct component_ops * ops)3411 component_add(struct device *dev, const struct component_ops *ops)
3412 {
3413 	struct component *component;
3414 
3415 	component = malloc(sizeof(*component), M_DEVBUF, M_WAITOK | M_ZERO);
3416 	component->dev = dev;
3417 	component->ops = ops;
3418 	SLIST_INSERT_HEAD(&component_list, component, next);
3419 	return 0;
3420 }
3421 
3422 int
component_add_typed(struct device * dev,const struct component_ops * ops,int type)3423 component_add_typed(struct device *dev, const struct component_ops *ops,
3424 	int type)
3425 {
3426 	return component_add(dev, ops);
3427 }
3428 
3429 int
component_bind_all(struct device * dev,void * data)3430 component_bind_all(struct device *dev, void *data)
3431 {
3432 	struct component *component;
3433 	int ret = 0;
3434 
3435 	SLIST_FOREACH(component, &component_list, next) {
3436 		if (component->adev == dev) {
3437 			ret = component->ops->bind(component->dev, NULL, data);
3438 			if (ret)
3439 				break;
3440 		}
3441 	}
3442 
3443 	return ret;
3444 }
3445 
3446 struct component_match_entry {
3447 	int (*compare)(struct device *, void *);
3448 	void *data;
3449 };
3450 
3451 struct component_match {
3452 	struct component_match_entry match[4];
3453 	int nmatches;
3454 };
3455 
3456 int
component_master_add_with_match(struct device * dev,const struct component_master_ops * ops,struct component_match * match)3457 component_master_add_with_match(struct device *dev,
3458     const struct component_master_ops *ops, struct component_match *match)
3459 {
3460 	struct component *component;
3461 	int found = 0;
3462 	int i, ret;
3463 
3464 	SLIST_FOREACH(component, &component_list, next) {
3465 		for (i = 0; i < match->nmatches; i++) {
3466 			struct component_match_entry *m = &match->match[i];
3467 			if (m->compare(component->dev, m->data)) {
3468 				component->adev = dev;
3469 				found = 1;
3470 				break;
3471 			}
3472 		}
3473 	}
3474 
3475 	if (found) {
3476 		ret = ops->bind(dev);
3477 		if (ret)
3478 			return ret;
3479 	}
3480 
3481 	return 0;
3482 }
3483 
3484 #ifdef __HAVE_FDT
3485 
3486 #include <linux/platform_device.h>
3487 #include <dev/ofw/openfirm.h>
3488 #include <dev/ofw/fdt.h>
3489 #include <machine/fdt.h>
3490 
3491 LIST_HEAD(, platform_device) pdev_list = LIST_HEAD_INITIALIZER(pdev_list);
3492 
3493 void
platform_device_register(struct platform_device * pdev)3494 platform_device_register(struct platform_device *pdev)
3495 {
3496 	int i;
3497 
3498 	pdev->num_resources = pdev->faa->fa_nreg;
3499 	if (pdev->faa->fa_nreg > 0) {
3500 		pdev->resource = mallocarray(pdev->faa->fa_nreg,
3501 		    sizeof(*pdev->resource), M_DEVBUF, M_WAITOK | M_ZERO);
3502 		for (i = 0; i < pdev->faa->fa_nreg; i++) {
3503 			pdev->resource[i].start = pdev->faa->fa_reg[i].addr;
3504 			pdev->resource[i].end = pdev->faa->fa_reg[i].addr +
3505 			    pdev->faa->fa_reg[i].size - 1;
3506 		}
3507 	}
3508 
3509 	pdev->parent = pdev->dev.dv_parent;
3510 	pdev->node = pdev->faa->fa_node;
3511 	pdev->iot = pdev->faa->fa_iot;
3512 	pdev->dmat = pdev->faa->fa_dmat;
3513 	LIST_INSERT_HEAD(&pdev_list, pdev, next);
3514 }
3515 
3516 
3517 struct resource *
platform_get_resource(struct platform_device * pdev,u_int type,u_int num)3518 platform_get_resource(struct platform_device *pdev, u_int type, u_int num)
3519 {
3520 	KASSERT(num < pdev->num_resources);
3521 	return &pdev->resource[num];
3522 }
3523 
3524 void __iomem *
devm_platform_ioremap_resource_byname(struct platform_device * pdev,const char * name)3525 devm_platform_ioremap_resource_byname(struct platform_device *pdev,
3526 				      const char *name)
3527 {
3528 	bus_space_handle_t ioh;
3529 	int err, idx;
3530 
3531 	idx = OF_getindex(pdev->node, name, "reg-names");
3532 	if (idx == -1 || idx >= pdev->num_resources)
3533 		return ERR_PTR(-EINVAL);
3534 
3535 	err = bus_space_map(pdev->iot, pdev->resource[idx].start,
3536 	    pdev->resource[idx].end - pdev->resource[idx].start + 1,
3537 	    BUS_SPACE_MAP_LINEAR, &ioh);
3538 	if (err)
3539 		return ERR_PTR(-err);
3540 
3541 	return bus_space_vaddr(pdev->iot, ioh);
3542 }
3543 
3544 #include <dev/ofw/ofw_clock.h>
3545 #include <linux/clk.h>
3546 
3547 struct clk *
devm_clk_get(struct device * dev,const char * name)3548 devm_clk_get(struct device *dev, const char *name)
3549 {
3550 	struct platform_device *pdev = (struct platform_device *)dev;
3551 	struct clk *clk;
3552 
3553 	clk = malloc(sizeof(*clk), M_DEVBUF, M_WAITOK);
3554 	clk->freq = clock_get_frequency(pdev->node, name);
3555 	return clk;
3556 }
3557 
3558 u_long
clk_get_rate(struct clk * clk)3559 clk_get_rate(struct clk *clk)
3560 {
3561 	return clk->freq;
3562 }
3563 
3564 #include <linux/gpio/consumer.h>
3565 #include <dev/ofw/ofw_gpio.h>
3566 
3567 struct gpio_desc {
3568 	uint32_t gpios[4];
3569 };
3570 
3571 struct gpio_desc *
devm_gpiod_get_optional(struct device * dev,const char * name,int flags)3572 devm_gpiod_get_optional(struct device *dev, const char *name, int flags)
3573 {
3574 	struct platform_device *pdev = (struct platform_device *)dev;
3575 	struct gpio_desc *desc;
3576 	char fullname[128];
3577 	int len;
3578 
3579 	snprintf(fullname, sizeof(fullname), "%s-gpios", name);
3580 
3581 	desc = malloc(sizeof(*desc), M_DEVBUF, M_WAITOK | M_ZERO);
3582 	len = OF_getpropintarray(pdev->node, fullname, desc->gpios,
3583 	     sizeof(desc->gpios));
3584 	KASSERT(len <= sizeof(desc->gpios));
3585 	if (len < 0) {
3586 		free(desc, M_DEVBUF, sizeof(*desc));
3587 		return NULL;
3588 	}
3589 
3590 	switch (flags) {
3591 	case GPIOD_IN:
3592 		gpio_controller_config_pin(desc->gpios, GPIO_CONFIG_INPUT);
3593 		break;
3594 	case GPIOD_OUT_HIGH:
3595 		gpio_controller_config_pin(desc->gpios, GPIO_CONFIG_OUTPUT);
3596 		gpio_controller_set_pin(desc->gpios, 1);
3597 		break;
3598 	default:
3599 		panic("%s: unimplemented flags 0x%x", __func__, flags);
3600 	}
3601 
3602 	return desc;
3603 }
3604 
3605 int
gpiod_get_value_cansleep(const struct gpio_desc * desc)3606 gpiod_get_value_cansleep(const struct gpio_desc *desc)
3607 {
3608 	return gpio_controller_get_pin(((struct gpio_desc *)desc)->gpios);
3609 }
3610 
3611 struct phy {
3612 	int node;
3613 	const char *name;
3614 };
3615 
3616 struct phy *
devm_phy_optional_get(struct device * dev,const char * name)3617 devm_phy_optional_get(struct device *dev, const char *name)
3618 {
3619 	struct platform_device *pdev = (struct platform_device *)dev;
3620 	struct phy *phy;
3621 	int idx;
3622 
3623 	idx = OF_getindex(pdev->node, name, "phy-names");
3624 	if (idx == -1)
3625 		return NULL;
3626 
3627 	phy = malloc(sizeof(*phy), M_DEVBUF, M_WAITOK);
3628 	phy->node = pdev->node;
3629 	phy->name = name;
3630 
3631 	return phy;
3632 }
3633 
3634 struct bus_type platform_bus_type;
3635 
3636 #include <dev/ofw/ofw_misc.h>
3637 
3638 #include <linux/of.h>
3639 #include <linux/platform_device.h>
3640 
3641 struct device_node *
__of_devnode(void * arg)3642 __of_devnode(void *arg)
3643 {
3644 	struct device *dev = container_of(arg, struct device, of_node);
3645 	struct platform_device *pdev = (struct platform_device *)dev;
3646 
3647 	return (struct device_node *)(uintptr_t)pdev->node;
3648 }
3649 
3650 int
__of_device_is_compatible(struct device_node * np,const char * compatible)3651 __of_device_is_compatible(struct device_node *np, const char *compatible)
3652 {
3653 	return OF_is_compatible((uintptr_t)np, compatible);
3654 }
3655 
3656 int
__of_property_present(struct device_node * np,const char * propname)3657 __of_property_present(struct device_node *np, const char *propname)
3658 {
3659 	return OF_getpropbool((uintptr_t)np, (char *)propname);
3660 }
3661 
3662 int
__of_property_read_variable_u32_array(struct device_node * np,const char * propname,uint32_t * out_values,size_t sz_min,size_t sz_max)3663 __of_property_read_variable_u32_array(struct device_node *np,
3664     const char *propname, uint32_t *out_values, size_t sz_min, size_t sz_max)
3665 {
3666 	int len;
3667 
3668 	len = OF_getpropintarray((uintptr_t)np, (char *)propname, out_values,
3669 	    sz_max * sizeof(*out_values));
3670 	if (len < 0)
3671 		return -EINVAL;
3672 	if (len == 0)
3673 		return -ENODATA;
3674 	if (len < sz_min * sizeof(*out_values) ||
3675 	    len > sz_max * sizeof(*out_values))
3676 		return -EOVERFLOW;
3677 	if (sz_min == 1 && sz_max == 1)
3678 		return 0;
3679 	return len / sizeof(*out_values);
3680 }
3681 
3682 int
__of_property_read_variable_u64_array(struct device_node * np,const char * propname,uint64_t * out_values,size_t sz_min,size_t sz_max)3683 __of_property_read_variable_u64_array(struct device_node *np,
3684     const char *propname, uint64_t *out_values, size_t sz_min, size_t sz_max)
3685 {
3686 	int len;
3687 
3688 	len = OF_getpropint64array((uintptr_t)np, (char *)propname, out_values,
3689 	    sz_max * sizeof(*out_values));
3690 	if (len < 0)
3691 		return -EINVAL;
3692 	if (len == 0)
3693 		return -ENODATA;
3694 	if (len < sz_min * sizeof(*out_values) ||
3695 	    len > sz_max * sizeof(*out_values))
3696 		return -EOVERFLOW;
3697 	if (sz_min == 1 && sz_max == 1)
3698 		return 0;
3699 	return len / sizeof(*out_values);
3700 }
3701 
3702 int
__of_property_match_string(struct device_node * np,const char * propname,const char * str)3703 __of_property_match_string(struct device_node *np,
3704     const char *propname, const char *str)
3705 {
3706 	int idx;
3707 
3708 	idx = OF_getindex((uintptr_t)np, str, propname);
3709 	if (idx == -1)
3710 		return -ENODATA;
3711 	return idx;
3712 }
3713 
3714 struct device_node *
__of_parse_phandle(struct device_node * np,const char * propname,int idx)3715 __of_parse_phandle(struct device_node *np, const char *propname, int idx)
3716 {
3717 	uint32_t phandles[16] = {};
3718 	int len, node;
3719 
3720 	len = OF_getpropintarray((uintptr_t)np, (char *)propname, phandles,
3721 	    sizeof(phandles));
3722 	if (len < (idx + 1) * sizeof(uint32_t))
3723 		return NULL;
3724 
3725 	node = OF_getnodebyphandle(phandles[idx]);
3726 	if (node == 0)
3727 		return NULL;
3728 
3729 	return (struct device_node *)(uintptr_t)node;
3730 }
3731 
3732 int
__of_parse_phandle_with_args(struct device_node * np,const char * propname,const char * cellsname,int idx,struct of_phandle_args * args)3733 __of_parse_phandle_with_args(struct device_node *np, const char *propname,
3734     const char *cellsname, int idx, struct of_phandle_args *args)
3735 {
3736 	uint32_t phandles[16] = {};
3737 	int i, len, node;
3738 
3739 	len = OF_getpropintarray((uintptr_t)np, (char *)propname, phandles,
3740 	    sizeof(phandles));
3741 	if (len < (idx + 1) * sizeof(uint32_t))
3742 		return -ENOENT;
3743 
3744 	node = OF_getnodebyphandle(phandles[idx]);
3745 	if (node == 0)
3746 		return -ENOENT;
3747 
3748 	args->np = (struct device_node *)(uintptr_t)node;
3749 	args->args_count = OF_getpropint(node, (char *)cellsname, 0);
3750 	for (i = 0; i < args->args_count; i++)
3751 		args->args[i] = phandles[i + 1];
3752 
3753 	return 0;
3754 }
3755 
3756 int
of_address_to_resource(struct device_node * np,int idx,struct resource * res)3757 of_address_to_resource(struct device_node *np, int idx, struct resource *res)
3758 {
3759 	uint64_t reg[16] = {};
3760 	int len;
3761 
3762 	KASSERT(idx < 8);
3763 
3764 	len = OF_getpropint64array((uintptr_t)np, "reg", reg, sizeof(reg));
3765 	if (len < 0 || idx >= (len / (2 * sizeof(uint64_t))))
3766 		return -EINVAL;
3767 
3768 	res->start = reg[2 * idx];
3769 	res->end = reg[2 * idx] + reg[2 * idx + 1] - 1;
3770 
3771 	return 0;
3772 }
3773 
3774 static int
next_node(int node)3775 next_node(int node)
3776 {
3777 	int peer = OF_peer(node);
3778 
3779 	while (node && !peer) {
3780 		node = OF_parent(node);
3781 		if (node)
3782 			peer = OF_peer(node);
3783 	}
3784 
3785 	return peer;
3786 }
3787 
3788 static int
find_matching_node(int node,const struct of_device_id * id)3789 find_matching_node(int node, const struct of_device_id *id)
3790 {
3791 	int child, match;
3792 	int i;
3793 
3794 	for (child = OF_child(node); child; child = OF_peer(child)) {
3795 		match = find_matching_node(child, id);
3796 		if (match)
3797 			return match;
3798 	}
3799 
3800 	for (i = 0; id[i].compatible; i++) {
3801 		if (OF_is_compatible(node, id[i].compatible))
3802 			return node;
3803 	}
3804 
3805 	return 0;
3806 }
3807 
3808 struct device_node *
__matching_node(struct device_node * np,const struct of_device_id * id)3809 __matching_node(struct device_node *np, const struct of_device_id *id)
3810 {
3811 	int node = OF_peer(0);
3812 	int match;
3813 
3814 	if (np)
3815 		node = next_node((uintptr_t)np);
3816 	while (node) {
3817 		match = find_matching_node(node, id);
3818 		if (match)
3819 			return (struct device_node *)(uintptr_t)match;
3820 		node = next_node(node);
3821 	}
3822 
3823 	return NULL;
3824 }
3825 
3826 struct platform_device *
of_platform_device_create(struct device_node * np,const char * bus_id,struct device * parent)3827 of_platform_device_create(struct device_node *np, const char *bus_id,
3828     struct device *parent)
3829 {
3830 	struct platform_device *pdev;
3831 
3832 	pdev = malloc(sizeof(*pdev), M_DEVBUF, M_WAITOK | M_ZERO);
3833 	pdev->node = (intptr_t)np;
3834 	pdev->parent = parent;
3835 
3836 	LIST_INSERT_HEAD(&pdev_list, pdev, next);
3837 
3838 	return pdev;
3839 }
3840 
3841 struct platform_device *
of_find_device_by_node(struct device_node * np)3842 of_find_device_by_node(struct device_node *np)
3843 {
3844 	struct platform_device *pdev;
3845 
3846 	LIST_FOREACH(pdev, &pdev_list, next) {
3847 		if (pdev->node == (intptr_t)np)
3848 			return pdev;
3849 	}
3850 
3851 	return NULL;
3852 }
3853 
3854 int
of_device_is_available(struct device_node * np)3855 of_device_is_available(struct device_node *np)
3856 {
3857 	char status[32];
3858 
3859 	if (OF_getprop((uintptr_t)np, "status", status, sizeof(status)) > 0 &&
3860 	    strcmp(status, "disabled") == 0)
3861 		return 0;
3862 
3863 	return 1;
3864 }
3865 
3866 int
of_dma_configure(struct device * dev,struct device_node * np,int force_dma)3867 of_dma_configure(struct device *dev, struct device_node *np, int force_dma)
3868 {
3869 	struct platform_device *pdev = (struct platform_device *)dev;
3870 	bus_dma_tag_t dmat = dma_tag_lookup(pdev->parent);
3871 
3872 	pdev->dmat = iommu_device_map(pdev->node, dmat);
3873 	return 0;
3874 }
3875 
3876 struct device_node *
__of_get_compatible_child(void * p,const char * compat)3877 __of_get_compatible_child(void *p, const char *compat)
3878 {
3879 	struct device *dev = container_of(p, struct device, of_node);
3880 	struct platform_device *pdev = (struct platform_device *)dev;
3881 	int child;
3882 
3883 	for (child = OF_child(pdev->node); child; child = OF_peer(child)) {
3884 		if (OF_is_compatible(child, compat))
3885 			return (struct device_node *)(uintptr_t)child;
3886 	}
3887 	return NULL;
3888 }
3889 
3890 struct device_node *
__of_get_child_by_name(void * p,const char * name)3891 __of_get_child_by_name(void *p, const char *name)
3892 {
3893 	struct device *dev = container_of(p, struct device, of_node);
3894 	struct platform_device *pdev = (struct platform_device *)dev;
3895 	int child;
3896 
3897 	child = OF_getnodebyname(pdev->node, name);
3898 	if (child == 0)
3899 		return NULL;
3900 	return (struct device_node *)(uintptr_t)child;
3901 }
3902 
3903 int
component_compare_of(struct device * dev,void * data)3904 component_compare_of(struct device *dev, void *data)
3905 {
3906 	struct platform_device *pdev = (struct platform_device *)dev;
3907 
3908 	return (pdev->node == (intptr_t)data);
3909 }
3910 
3911 void
drm_of_component_match_add(struct device * master,struct component_match ** matchptr,int (* compare)(struct device *,void *),struct device_node * np)3912 drm_of_component_match_add(struct device *master,
3913 			   struct component_match **matchptr,
3914 			   int (*compare)(struct device *, void *),
3915 			   struct device_node *np)
3916 {
3917 	struct component_match *match = *matchptr;
3918 
3919 	if (match == NULL) {
3920 		match = malloc(sizeof(struct component_match),
3921 		    M_DEVBUF, M_WAITOK | M_ZERO);
3922 		*matchptr = match;
3923 	}
3924 
3925 	KASSERT(match->nmatches < nitems(match->match));
3926 	match->match[match->nmatches].compare = compare;
3927 	match->match[match->nmatches].data = np;
3928 	match->nmatches++;
3929 }
3930 
3931 #endif
3932