1 /*-
2  * Copyright (c) 2009-2021 Hans Petter Selasky. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 /*
27  * NOTE: Some functions in this file derive directly from the Linux kernel
28  * sources and are covered by the GPLv2.
29  */
30 
31 #include <media/v4l2-dev.h>
32 
33 #include <linux/leds.h>
34 #include <linux/major.h>
35 #include <linux/power_supply.h>
36 #include <linux/dma-buf.h>
37 #include <linux/rational.h>
38 
39 #include <dvbdev.h>
40 
41 static struct timespec ktime_mono_to_real_offset;
42 static struct timespec ktime_mono_to_uptime_offset;
43 
44 int
printk_nop()45 printk_nop()
46 {
47 	return (1);
48 }
49 
50 void
le16_to_cpus(uint16_t * p)51 le16_to_cpus(uint16_t *p)
52 {
53 	uint16_t temp;
54 
55 	/* assuming that the pointer is correctly aligned */
56 
57 	temp = ((uint8_t *)p)[0] | (((uint8_t *)p)[1] << 8);
58 
59 	*p = temp;
60 }
61 
62 void
le32_to_cpus(uint32_t * p)63 le32_to_cpus(uint32_t *p)
64 {
65 	uint32_t temp;
66 
67 	/* assuming that the pointer is correctly aligned */
68 
69 	temp = (((uint8_t *)p)[0] | (((uint8_t *)p)[1] << 8) |
70 	    (((uint8_t *)p)[2] << 16) | (((uint8_t *)p)[3] << 24));
71 
72 	*p = temp;
73 }
74 
75 void
le64_to_cpus(uint64_t * p)76 le64_to_cpus(uint64_t *p)
77 {
78 	uint64_t temp;
79 
80 	/* assuming that the pointer is correctly aligned */
81 	temp = (((uint8_t *)p)[4] | (((uint8_t *)p)[5] << 8) |
82 	    (((uint8_t *)p)[6] << 16) | (((uint8_t *)p)[7] << 24));
83 	temp <<= 32;
84 	temp |= (((uint8_t *)p)[0] | (((uint8_t *)p)[1] << 8) |
85 	    (((uint8_t *)p)[2] << 16) | (((uint8_t *)p)[3] << 24));
86 
87 	*p = temp;
88 }
89 
90 uint16_t
be16_to_cpup(uint16_t * p)91 be16_to_cpup(uint16_t *p)
92 {
93 	return be16_to_cpu(*p);
94 }
95 
96 uint16_t
cpu_to_be16p(uint16_t * p)97 cpu_to_be16p(uint16_t *p)
98 {
99 	return cpu_to_be16(*p);
100 }
101 
102 
103 uint16_t
le16_to_cpup(uint16_t * p)104 le16_to_cpup(uint16_t *p)
105 {
106 	return le16_to_cpu(*p);
107 }
108 
109 uint16_t
cpu_to_le16p(uint16_t * p)110 cpu_to_le16p(uint16_t *p)
111 {
112 	return cpu_to_le16(*p);
113 }
114 
115 uint32_t
le32_to_cpup(uint32_t * p)116 le32_to_cpup(uint32_t *p)
117 {
118 	return le32_to_cpu(*p);
119 }
120 
121 uint32_t
cpu_to_le32p(uint32_t * p)122 cpu_to_le32p(uint32_t *p)
123 {
124 	return cpu_to_le32(*p);
125 }
126 
127 uint32_t
be32_to_cpup(uint32_t * p)128 be32_to_cpup(uint32_t *p)
129 {
130 	return be32_to_cpu(*p);
131 }
132 
133 uint32_t
cpu_to_be32p(uint32_t * p)134 cpu_to_be32p(uint32_t *p)
135 {
136 	return cpu_to_be32(*p);
137 }
138 
139 uint64_t
le64_to_cpup(uint64_t * p)140 le64_to_cpup(uint64_t *p)
141 {
142 	return le64_to_cpu(*p);
143 }
144 
145 uint64_t
cpu_to_le64p(uint64_t * p)146 cpu_to_le64p(uint64_t *p)
147 {
148 	return cpu_to_le64(*p);
149 }
150 
151 uint64_t
be64_to_cpup(uint64_t * p)152 be64_to_cpup(uint64_t *p)
153 {
154 	return be64_to_cpu(*p);
155 }
156 
157 uint64_t
cpu_to_be64p(uint64_t * p)158 cpu_to_be64p(uint64_t *p)
159 {
160 	return cpu_to_be64(*p);
161 }
162 
163 void
put_unaligned_le64(uint64_t val,void * _ptr)164 put_unaligned_le64(uint64_t val, void *_ptr)
165 {
166 	uint8_t *ptr = _ptr;
167 
168 	ptr[0] = val;
169 	val >>= 8;
170 	ptr[1] = val;
171 	val >>= 8;
172 	ptr[2] = val;
173 	val >>= 8;
174 	ptr[3] = val;
175 	val >>= 8;
176 	ptr[4] = val;
177 	val >>= 8;
178 	ptr[5] = val;
179 	val >>= 8;
180 	ptr[6] = val;
181 	val >>= 8;
182 	ptr[7] = val;
183 }
184 
185 void
put_unaligned_be64(uint64_t val,void * _ptr)186 put_unaligned_be64(uint64_t val, void *_ptr)
187 {
188 	uint8_t *ptr = _ptr;
189 
190 	ptr[7] = val;
191 	val >>= 8;
192 	ptr[6] = val;
193 	val >>= 8;
194 	ptr[5] = val;
195 	val >>= 8;
196 	ptr[4] = val;
197 	val >>= 8;
198 	ptr[3] = val;
199 	val >>= 8;
200 	ptr[2] = val;
201 	val >>= 8;
202 	ptr[1] = val;
203 	val >>= 8;
204 	ptr[0] = val;
205 }
206 
207 void
put_unaligned_le32(uint32_t val,void * _ptr)208 put_unaligned_le32(uint32_t val, void *_ptr)
209 {
210 	uint8_t *ptr = _ptr;
211 
212 	ptr[0] = val;
213 	val >>= 8;
214 	ptr[1] = val;
215 	val >>= 8;
216 	ptr[2] = val;
217 	val >>= 8;
218 	ptr[3] = val;
219 }
220 
221 void
put_unaligned_be32(uint32_t val,void * _ptr)222 put_unaligned_be32(uint32_t val, void *_ptr)
223 {
224 	uint8_t *ptr = _ptr;
225 
226 	ptr[3] = val;
227 	val >>= 8;
228 	ptr[2] = val;
229 	val >>= 8;
230 	ptr[1] = val;
231 	val >>= 8;
232 	ptr[0] = val;
233 }
234 
235 void
put_unaligned_be16(uint16_t val,void * _ptr)236 put_unaligned_be16(uint16_t val, void *_ptr)
237 {
238 	uint8_t *ptr = _ptr;
239 
240 	ptr[0] = (val >> 8);
241 	ptr[1] = val;
242 }
243 
244 void
put_unaligned_le16(uint16_t val,void * _ptr)245 put_unaligned_le16(uint16_t val, void *_ptr)
246 {
247 	uint8_t *ptr = _ptr;
248 
249 	ptr[0] = val;
250 	val >>= 8;
251 	ptr[1] = val;
252 }
253 
254 uint64_t
get_unaligned_le64(const void * _ptr)255 get_unaligned_le64(const void *_ptr)
256 {
257 	const uint8_t *ptr = _ptr;
258 	uint64_t val;
259 
260 	val = ptr[7];
261 	val <<= 8;
262 	val |= ptr[6];
263 	val <<= 8;
264 	val |= ptr[5];
265 	val <<= 8;
266 	val |= ptr[4];
267 	val <<= 8;
268 	val |= ptr[3];
269 	val <<= 8;
270 	val |= ptr[2];
271 	val <<= 8;
272 	val |= ptr[1];
273 	val <<= 8;
274 	val |= ptr[0];
275 	return (val);
276 }
277 
278 uint64_t
get_unaligned_be64(const void * _ptr)279 get_unaligned_be64(const void *_ptr)
280 {
281 	const uint8_t *ptr = _ptr;
282 	uint64_t val;
283 
284 	val = ptr[0];
285 	val <<= 8;
286 	val |= ptr[1];
287 	val <<= 8;
288 	val |= ptr[2];
289 	val <<= 8;
290 	val |= ptr[3];
291 	val <<= 8;
292 	val |= ptr[4];
293 	val <<= 8;
294 	val |= ptr[5];
295 	val <<= 8;
296 	val |= ptr[6];
297 	val <<= 8;
298 	val |= ptr[7];
299 	return (val);
300 }
301 
302 uint32_t
get_unaligned_le32(const void * _ptr)303 get_unaligned_le32(const void *_ptr)
304 {
305 	const uint8_t *ptr = _ptr;
306 	uint32_t val;
307 
308 	val = ptr[3];
309 	val <<= 8;
310 	val |= ptr[2];
311 	val <<= 8;
312 	val |= ptr[1];
313 	val <<= 8;
314 	val |= ptr[0];
315 	return (val);
316 }
317 
318 uint32_t
get_unaligned_be32(const void * _ptr)319 get_unaligned_be32(const void *_ptr)
320 {
321 	const uint8_t *ptr = _ptr;
322 	uint32_t val;
323 
324 	val = ptr[0];
325 	val <<= 8;
326 	val |= ptr[1];
327 	val <<= 8;
328 	val |= ptr[2];
329 	val <<= 8;
330 	val |= ptr[3];
331 	return (val);
332 }
333 
334 uint16_t
get_unaligned_be16(const void * _ptr)335 get_unaligned_be16(const void *_ptr)
336 {
337 	const uint8_t *ptr = _ptr;
338 	uint16_t val;
339 
340 	val = ptr[0];
341 	val <<= 8;
342 	val |= ptr[1];
343 	return (val);
344 }
345 
346 uint16_t
get_unaligned_le16(const void * _ptr)347 get_unaligned_le16(const void *_ptr)
348 {
349 	const uint8_t *ptr = _ptr;
350 	uint16_t val;
351 
352 	val = ptr[1];
353 	val <<= 8;
354 	val |= ptr[0];
355 	return (val);
356 }
357 
358 void   *
devm_kcalloc(struct device * dev,size_t n,size_t size,gfp_t gfp)359 devm_kcalloc(struct device *dev, size_t n, size_t size, gfp_t gfp)
360 {
361 	return (calloc(n, size));
362 }
363 
364 void   *
devm_kzalloc(struct device * dev,size_t size,gfp_t gfp)365 devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
366 {
367 	void *ptr;
368 
369 	/*
370 	 * TODO: Register data so that it gets freed
371 	 * when the device is freed.
372 	 */
373 	ptr = malloc(size);
374 	if (ptr != NULL)
375 		memset(ptr, 0, size);
376 	return (ptr);
377 }
378 
379 void   *
devm_kmalloc(struct device * dev,size_t size,gfp_t gfp)380 devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
381 {
382 	return (malloc(size));
383 }
384 
385 void   *
devm_kmemdup(struct device * dev,const void * data,size_t size,gfp_t gfp)386 devm_kmemdup(struct device *dev, const void *data, size_t size, gfp_t gfp)
387 {
388 	void *ptr;
389 
390 	ptr = malloc(size);
391 	if (ptr != NULL)
392 		memcpy(ptr, data, size);
393 	return (ptr);
394 }
395 
396 void   *
devm_kmalloc_array(struct device * dev,size_t n,size_t size,gfp_t flags)397 devm_kmalloc_array(struct device *dev,
398     size_t n, size_t size, gfp_t flags)
399 {
400 	size_t total = n * size;
401 
402 	if (size != 0 && total / size != n)
403 		return (NULL);		/* overflow */
404 	return (malloc(total));
405 }
406 
407 void
devm_kfree(struct device * dev,void * ptr)408 devm_kfree(struct device *dev, void *ptr)
409 {
410 	free(ptr);
411 }
412 
413 int
devm_add_action(struct device * dev,void (* action)(void *),void * data)414 devm_add_action(struct device *dev, void (*action) (void *), void *data)
415 {
416 	return (0);
417 }
418 
419 int
devm_add_action_or_reset(struct device * dev,void (* action)(void *),void * data)420 devm_add_action_or_reset(struct device *dev,
421     void (*action) (void *), void *data)
422 {
423 	int ret;
424 
425 	ret = devm_add_action(dev, action, data);
426 	if (ret)
427 		action(data);
428 
429 	return (ret);
430 }
431 
432 struct clk *
devm_clk_get(struct device * dev,const char * id)433 devm_clk_get(struct device *dev, const char *id)
434 {
435 	return (ERR_PTR(-EOPNOTSUPP));
436 }
437 
438 void   *
dev_get_drvdata(const struct device * dev)439 dev_get_drvdata(const struct device *dev)
440 {
441 	return (dev->driver_data);
442 }
443 
444 void
dev_set_drvdata(struct device * dev,void * data)445 dev_set_drvdata(struct device *dev, void *data)
446 {
447 	dev->driver_data = data;
448 }
449 
450 const char *
dev_name(const struct device * dev)451 dev_name(const struct device *dev)
452 {
453 	if (dev == NULL)
454 		return ("NULL");
455 	return (dev->name);
456 }
457 
458 int
atomic_add(int i,atomic_t * v)459 atomic_add(int i, atomic_t *v)
460 {
461 	atomic_lock();
462 	v->counter += i;
463 	i = v->counter;
464 	atomic_unlock();
465 
466 	return (i);
467 }
468 
469 int
atomic_add_unless(atomic_t * v,int a,int u)470 atomic_add_unless(atomic_t *v, int a, int u)
471 {
472 	int c;
473 
474 	atomic_lock();
475 	c = v->counter;
476 	if (c != u)
477 		v->counter += a;
478 	atomic_unlock();
479 
480 	return (c != u);
481 }
482 
483 int
atomic_inc(atomic_t * v)484 atomic_inc(atomic_t *v)
485 {
486 	int i;
487 
488 	atomic_lock();
489 	v->counter++;
490 	i = v->counter;
491 	atomic_unlock();
492 
493 	return (i);
494 }
495 
496 int
atomic_dec(atomic_t * v)497 atomic_dec(atomic_t *v)
498 {
499 	int i;
500 
501 	atomic_lock();
502 	v->counter--;
503 	i = v->counter;
504 	atomic_unlock();
505 
506 	return (i);
507 }
508 
509 void
atomic_set(atomic_t * v,int i)510 atomic_set(atomic_t *v, int i)
511 {
512 	atomic_lock();
513 	v->counter = i;
514 	atomic_unlock();
515 }
516 
517 int
atomic_read(const atomic_t * v)518 atomic_read(const atomic_t *v)
519 {
520 	int i;
521 
522 	atomic_lock();
523 	i = v->counter;
524 	atomic_unlock();
525 	return (i);
526 }
527 
528 int
atomic_dec_and_test(atomic_t * v)529 atomic_dec_and_test(atomic_t *v)
530 {
531 	int i;
532 
533 	atomic_lock();
534 	v->counter--;
535 	i = v->counter;
536 	atomic_unlock();
537 
538 	return (i == 0);
539 }
540 
541 int
atomic_cmpxchg(atomic_t * v,int old,int new)542 atomic_cmpxchg(atomic_t *v, int old, int new)
543 {
544 	int prev;
545 
546 	atomic_lock();
547 	prev = v->counter;
548 	if (prev == old)
549 		v->counter = new;
550 	atomic_unlock();
551 	return (prev);
552 }
553 
554 uint64_t
atomic64_read(atomic64_t * v)555 atomic64_read(atomic64_t *v)
556 {
557 	uint64_t value;
558 
559 	atomic_lock();
560 	value = v->counter;
561 	atomic_unlock();
562 	return (value);
563 }
564 
565 void
atomic64_or(uint64_t data,atomic64_t * v)566 atomic64_or(uint64_t data, atomic64_t *v)
567 {
568 	atomic_lock();
569 	v->counter |= data;
570 	atomic_unlock();
571 }
572 
573 void
atomic64_xor(uint64_t data,atomic64_t * v)574 atomic64_xor(uint64_t data, atomic64_t *v)
575 {
576 	atomic_lock();
577 	v->counter ^= data;
578 	atomic_unlock();
579 }
580 
581 void
atomic64_and(uint64_t data,atomic64_t * v)582 atomic64_and(uint64_t data, atomic64_t *v)
583 {
584 	atomic_lock();
585 	v->counter &= data;
586 	atomic_unlock();
587 }
588 
589 void
atomic64_andnot(uint64_t data,atomic64_t * v)590 atomic64_andnot(uint64_t data, atomic64_t *v)
591 {
592 	atomic_lock();
593 	v->counter &= ~data;
594 	atomic_unlock();
595 }
596 
597 int
test_bit(int nr,const void * addr)598 test_bit(int nr, const void *addr)
599 {
600 	unsigned long mask = BIT_MASK(nr);
601 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
602 	int i;
603 
604 	atomic_lock();
605 	i = (*p & mask) ? 1 : 0;
606 	atomic_unlock();
607 
608 	return (i);
609 }
610 
611 int
test_and_set_bit(int nr,volatile unsigned long * addr)612 test_and_set_bit(int nr, volatile unsigned long *addr)
613 {
614 	unsigned long mask = BIT_MASK(nr);
615 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
616 	unsigned long old;
617 
618 	atomic_lock();
619 	old = *p;
620 	*p = old | mask;
621 	atomic_unlock();
622 	return (old & mask) != 0;
623 }
624 
625 int
test_and_clear_bit(int nr,volatile unsigned long * addr)626 test_and_clear_bit(int nr, volatile unsigned long *addr)
627 {
628 	unsigned long mask = BIT_MASK(nr);
629 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
630 	unsigned long old;
631 
632 	atomic_lock();
633 	old = *p;
634 	*p = old & ~mask;
635 	atomic_unlock();
636 
637 	return (old & mask) != 0;
638 }
639 
640 void
set_bit(int nr,volatile unsigned long * addr)641 set_bit(int nr, volatile unsigned long *addr)
642 {
643 	unsigned long mask = BIT_MASK(nr);
644 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
645 
646 	atomic_lock();
647 	*p |= mask;
648 	atomic_unlock();
649 }
650 
651 void
clear_bit(int nr,volatile unsigned long * addr)652 clear_bit(int nr, volatile unsigned long *addr)
653 {
654 	unsigned long mask = BIT_MASK(nr);
655 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
656 
657 	atomic_lock();
658 	*p &= ~mask;
659 	atomic_unlock();
660 }
661 
662 void
change_bit(int nr,volatile unsigned long * addr)663 change_bit(int nr, volatile unsigned long *addr)
664 {
665 	unsigned long mask = BIT_MASK(nr);
666 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
667 
668 	atomic_lock();
669 	*p ^= mask;
670 	atomic_unlock();
671 }
672 
673 uint8_t
bitrev8(uint8_t a)674 bitrev8(uint8_t a)
675 {
676 	a = ((a & 0x55) << 1) | ((a & 0xAA) >> 1);
677 	a = ((a & 0x33) << 2) | ((a & 0xCC) >> 2);
678 	a = ((a & 0x0F) << 4) | ((a & 0xF0) >> 4);
679 	return (a);
680 }
681 
682 uint16_t
bitrev16(uint16_t a)683 bitrev16(uint16_t a)
684 {
685 	a = ((a & 0x5555) << 1) | ((a & 0xAAAA) >> 1);
686 	a = ((a & 0x3333) << 2) | ((a & 0xCCCC) >> 2);
687 	a = ((a & 0x0F0F) << 4) | ((a & 0xF0F0) >> 4);
688 	a = ((a & 0x00FF) << 8) | ((a & 0xFF00) >> 8);
689 	return (a);
690 }
691 
692 size_t
memweight(const void * ptr,size_t bytes)693 memweight(const void *ptr, size_t bytes)
694 {
695 	size_t x;
696 	size_t y;
697 
698 	for (x = y = 0; x != bytes; x++) {
699 		y += hweight8(((uint8_t *)ptr)[x]);
700 	}
701 	return (y);
702 }
703 
704 unsigned int
hweight8(unsigned int w)705 hweight8(unsigned int w)
706 {
707 	unsigned int res = w - ((w >> 1) & 0x55);
708 
709 	res = (res & 0x33) + ((res >> 2) & 0x33);
710 	return (res + (res >> 4)) & 0x0F;
711 }
712 
713 unsigned int
hweight16(unsigned int w)714 hweight16(unsigned int w)
715 {
716 	unsigned int res = w - ((w >> 1) & 0x5555);
717 
718 	res = (res & 0x3333) + ((res >> 2) & 0x3333);
719 	res = (res + (res >> 4)) & 0x0F0F;
720 	return (res + (res >> 8)) & 0x00FF;
721 }
722 
723 unsigned int
hweight32(unsigned int w)724 hweight32(unsigned int w)
725 {
726 	unsigned int res = w - ((w >> 1) & 0x55555555);
727 
728 	res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
729 	res = (res + (res >> 4)) & 0x0F0F0F0F;
730 	res = res + (res >> 8);
731 	return (res + (res >> 16)) & 0x000000FF;
732 }
733 
734 unsigned long
hweight64(uint64_t w)735 hweight64(uint64_t w)
736 {
737 	if (sizeof(long) == 4) {
738 		return (hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w));
739 	} else {
740 		uint64_t res = w - ((w >> 1) & 0x5555555555555555ul);
741 
742 		res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
743 		res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
744 		res = res + (res >> 8);
745 		res = res + (res >> 16);
746 		return (res + (res >> 32)) & 0x00000000000000FFul;
747 	}
748 }
749 
750 struct cdev *
cdev_alloc(void)751 cdev_alloc(void)
752 {
753 	struct cdev *cdev;
754 
755 	cdev = malloc(sizeof(*cdev));
756 	if (cdev == NULL)
757 		goto done;
758 
759 	/* initialise cdev */
760 	cdev_init(cdev, NULL);
761 
762 	cdev->is_alloced = 1;
763 
764 done:
765 	return (cdev);
766 }
767 
768 #define	ROCCAT_MAJOR 35
769 #define	LIRC_MAJOR 14
770 #define	EVDEV_MINOR_BASE 64
771 #define	JOYDEV_MINOR_BASE 0
772 
773 #define	SUB_MAX (F_V4B_SUBDEV_MAX * F_V4B_SUBSUBDEV_MAX)
774 
775 static struct cdev *cdev_registry[F_V4B_MAX][SUB_MAX];
776 static uint32_t cdev_mm[F_V4B_MAX][SUB_MAX];
777 
778 static int dvb_swap_fe;
779 
780 static TAILQ_HEAD(, bus_type) bus_type_head = TAILQ_HEAD_INITIALIZER(bus_type_head);
781 static TAILQ_HEAD(, device_driver) device_driver_head = TAILQ_HEAD_INITIALIZER(device_driver_head);
782 
783 module_param_named(dvb_swap_fe, dvb_swap_fe, int, 0644);
784 MODULE_PARM_DESC(dvb_swap_fe, "swap default DVB frontend, 0..3");
785 
786 static void
cdev_set_device(dev_t mm,struct cdev * cdev)787 cdev_set_device(dev_t mm, struct cdev *cdev)
788 {
789 	uint8_t subdev;
790 	uint8_t id;
791 
792 	switch (mm & 0xFFFF0000U) {
793 	case MKDEV(INPUT_MAJOR, 0):
794 		switch (mm & 0xFFC0) {
795 		case EVDEV_MINOR_BASE:
796 			subdev = mm & 0x3F;
797 			if (subdev >= F_V4B_SUBDEV_MAX)
798 				break;
799 			cdev_registry[F_V4B_EVDEV][subdev] = cdev;
800 			cdev_mm[F_V4B_EVDEV][subdev] = mm;
801 			break;
802 
803 		case JOYDEV_MINOR_BASE:
804 			subdev = mm & 0x3F;
805 			if (subdev >= F_V4B_SUBDEV_MAX)
806 				break;
807 			cdev_registry[F_V4B_JOYDEV][subdev] = cdev;
808 			cdev_mm[F_V4B_JOYDEV][subdev] = mm;
809 			break;
810 		default:
811 			subdev = 0;
812 			goto error;
813 		}
814 		break;
815 
816 	case MKDEV(LIRC_MAJOR, 0):
817 		subdev = mm & 0xFF;
818 		if (subdev >= F_V4B_SUBDEV_MAX)
819 			break;
820 		cdev_registry[F_V4B_LIRC][subdev] = cdev;
821 		cdev_mm[F_V4B_LIRC][subdev] = mm;
822 		break;
823 
824 	case MKDEV(ROCCAT_MAJOR, 0):
825 		subdev = mm & 0xFF;
826 		if (subdev >= F_V4B_SUBDEV_MAX)
827 			break;
828 		cdev_registry[F_V4B_ROCCAT][subdev] = cdev;
829 		cdev_mm[F_V4B_ROCCAT][subdev] = mm;
830 		break;
831 
832 	case MKDEV(VIDEO_MAJOR, 0):
833 		subdev = mm & 0xFF;
834 		if (subdev >= F_V4B_SUBDEV_MAX)
835 			goto error;
836 		cdev_registry[F_V4B_VIDEO][subdev] = cdev;
837 		cdev_mm[F_V4B_VIDEO][subdev] = mm;
838 		break;
839 
840 	case MKDEV(DVB_MAJOR, 0):
841 #define DVB_DEVICE_VIDEO_MINOR 0
842 #define DVB_DEVICE_AUDIO_MINOR 1
843 #define DVB_DEVICE_SEC_MINOR 2
844 #define DVB_DEVICE_FRONTEND_MINOR 3
845 #define DVB_DEVICE_DEMUX_MINOR 4
846 #define DVB_DEVICE_DVR_MINOR 5
847 #define DVB_DEVICE_CA_MINOR 6
848 #define DVB_DEVICE_NET_MINOR 7
849 #define DVB_DEVICE_OSD_MINOR 8
850 		subdev = (mm >> 6) & 0x3FF;
851 		if (subdev >= F_V4B_SUBDEV_MAX)
852 			return;
853 
854 		id = (mm >> 4) & 0x03;
855 
856 		switch (mm & 0xFFFF000FU) {
857 		case MKDEV(DVB_MAJOR, DVB_DEVICE_FRONTEND_MINOR):
858 			id = (id ^ dvb_swap_fe) & 0x03;
859 			break;
860 		default:
861 			break;
862 		}
863 
864 		subdev += F_V4B_SUBDEV_MAX * id;
865 
866 		switch (mm & 0xFFFF000FU) {
867 		case MKDEV(DVB_MAJOR, DVB_DEVICE_AUDIO_MINOR):
868 			cdev_registry[F_V4B_DVB_AUDIO][subdev] = cdev;
869 			cdev_mm[F_V4B_DVB_AUDIO][subdev] = mm;
870 			break;
871 		case MKDEV(DVB_MAJOR, DVB_DEVICE_CA_MINOR):
872 			cdev_registry[F_V4B_DVB_CA][subdev] = cdev;
873 			cdev_mm[F_V4B_DVB_CA][subdev] = mm;
874 			break;
875 		case MKDEV(DVB_MAJOR, DVB_DEVICE_DEMUX_MINOR):
876 			cdev_registry[F_V4B_DVB_DEMUX][subdev] = cdev;
877 			cdev_mm[F_V4B_DVB_DEMUX][subdev] = mm;
878 			break;
879 		case MKDEV(DVB_MAJOR, DVB_DEVICE_DVR_MINOR):
880 			cdev_registry[F_V4B_DVB_DVR][subdev] = cdev;
881 			cdev_mm[F_V4B_DVB_DVR][subdev] = mm;
882 			break;
883 		case MKDEV(DVB_MAJOR, DVB_DEVICE_FRONTEND_MINOR):
884 			cdev_registry[F_V4B_DVB_FRONTEND][subdev] = cdev;
885 			cdev_mm[F_V4B_DVB_FRONTEND][subdev] = mm;
886 			break;
887 		case MKDEV(DVB_MAJOR, DVB_DEVICE_OSD_MINOR):
888 			cdev_registry[F_V4B_DVB_OSD][subdev] = cdev;
889 			cdev_mm[F_V4B_DVB_OSD][subdev] = mm;
890 			break;
891 		case MKDEV(DVB_MAJOR, DVB_DEVICE_SEC_MINOR):
892 			cdev_registry[F_V4B_DVB_SEC][subdev] = cdev;
893 			cdev_mm[F_V4B_DVB_SEC][subdev] = mm;
894 			break;
895 		case MKDEV(DVB_MAJOR, DVB_DEVICE_VIDEO_MINOR):
896 			cdev_registry[F_V4B_DVB_VIDEO][subdev] = cdev;
897 			cdev_mm[F_V4B_DVB_VIDEO][subdev] = mm;
898 			break;
899 		default:
900 			break;		/* silently ignore */
901 		}
902 		break;
903 	default:
904 		subdev = 0;
905 		goto error;
906 	}
907 	return;
908 
909 error:
910 	printf("Trying to register "
911 	    "unknown device(0x%08jx) "
912 	    "or subdevice(%d) too big.\n",
913 	    (uintmax_t)mm, (int)subdev);
914 	return;
915 }
916 
917 struct cdev *
cdev_get_device(unsigned int f_v4b)918 cdev_get_device(unsigned int f_v4b)
919 {
920 	unsigned int subunit;
921 
922 	subunit = f_v4b % SUB_MAX;
923 
924 	f_v4b /= SUB_MAX;
925 
926 	if (f_v4b >= F_V4B_MAX)
927 		return (NULL);		/* should not happen */
928 
929 	return (cdev_registry[f_v4b][subunit]);
930 }
931 
932 uint32_t
cdev_get_mm(unsigned int f_v4b)933 cdev_get_mm(unsigned int f_v4b)
934 {
935 	unsigned int subunit;
936 
937 	subunit = f_v4b % SUB_MAX;
938 
939 	f_v4b /= SUB_MAX;
940 
941 	if (f_v4b >= F_V4B_MAX)
942 		return (0);		/* should not happen */
943 
944 	return (cdev_mm[f_v4b][subunit]);
945 }
946 
947 void
cdev_init(struct cdev * cdev,const struct file_operations * fops)948 cdev_init(struct cdev *cdev, const struct file_operations *fops)
949 {
950 	memset(cdev, 0, sizeof(*cdev));
951 
952 	cdev->ops = fops;
953 }
954 
955 int
cdev_add(struct cdev * cdev,dev_t mm,unsigned count)956 cdev_add(struct cdev *cdev, dev_t mm, unsigned count)
957 {
958 	cdev->mm_start = mm;
959 	cdev->mm_end = mm + count;
960 
961 	while (mm != cdev->mm_end) {
962 		cdev_set_device(mm, cdev);
963 		mm++;
964 	}
965 
966 	return (0);
967 }
968 
969 int
register_chrdev(dev_t mm,const char * desc,const struct file_operations * fops)970 register_chrdev(dev_t mm, const char *desc, const struct file_operations *fops)
971 {
972 	struct cdev *cdev;
973 
974 	switch (mm) {
975 	case INPUT_MAJOR:
976 		cdev = cdev_alloc();
977 		if (cdev == NULL)
978 			goto error;
979 		cdev->ops = fops;
980 		cdev_add(cdev, MKDEV(mm, EVDEV_MINOR_BASE), 32);
981 		break;
982 	default:
983 		goto error;
984 	}
985 	return (0);
986 
987 error:
988 	printf("Cannot register character "
989 	    "device mm=0x%08jx and desc='%s'.\n", (uintmax_t)mm, desc);
990 	return (-1);
991 }
992 
993 int
unregister_chrdev(dev_t mm,const char * desc)994 unregister_chrdev(dev_t mm, const char *desc)
995 {
996 	printf("Cannot unregister character "
997 	    "device mm=0x%08jx and desc='%s'.\n", (uintmax_t)mm, desc);
998 	return (-1);
999 }
1000 
1001 void
cdev_del(struct cdev * cdev)1002 cdev_del(struct cdev *cdev)
1003 {
1004 	dev_t mm;
1005 
1006 	if (cdev == NULL)
1007 		return;
1008 
1009 	mm = cdev->mm_start;
1010 
1011 	while (mm != cdev->mm_end) {
1012 		cdev_set_device(mm, NULL);
1013 		mm++;
1014 	}
1015 
1016 	if (cdev->is_alloced)
1017 		free(cdev);
1018 }
1019 
1020 void
cdev_set_parent(struct cdev * p,struct kobject * kobj)1021 cdev_set_parent(struct cdev *p, struct kobject *kobj)
1022 {
1023 	p->kobj.parent = kobj;
1024 }
1025 
1026 int
cdev_device_add(struct cdev * cdev,struct device * dev)1027 cdev_device_add(struct cdev *cdev, struct device *dev)
1028 {
1029 	int rc = 0;
1030 
1031 	if (dev->devt) {
1032 		cdev_set_parent(cdev, &dev->kobj);
1033 
1034 		rc = cdev_add(cdev, dev->devt, 1);
1035 		if (rc)
1036 			return (rc);
1037 	}
1038 	rc = device_add(dev);
1039 	if (rc)
1040 		cdev_del(cdev);
1041 
1042 	return (rc);
1043 }
1044 
1045 void
cdev_device_del(struct cdev * cdev,struct device * dev)1046 cdev_device_del(struct cdev *cdev, struct device *dev)
1047 {
1048 	device_del(dev);
1049 	if (dev->devt)
1050 		cdev_del(cdev);
1051 }
1052 
1053 void
kref_init(struct kref * kref)1054 kref_init(struct kref *kref)
1055 {
1056 	atomic_lock();
1057 	kref->refcount.counter = 1;
1058 	atomic_unlock();
1059 }
1060 
1061 void
kref_get(struct kref * kref)1062 kref_get(struct kref *kref)
1063 {
1064 	atomic_inc(&kref->refcount);
1065 }
1066 
1067 int
kref_get_unless_zero(struct kref * kref)1068 kref_get_unless_zero(struct kref *kref)
1069 {
1070 	return (atomic_add_unless(&kref->refcount, 1, 0));
1071 }
1072 
1073 int
kref_put(struct kref * kref,void (* release)(struct kref * kref))1074 kref_put(struct kref *kref, void (*release) (struct kref *kref))
1075 {
1076 	if (atomic_dec(&kref->refcount) == 0) {
1077 		release(kref);
1078 		return 1;
1079 	}
1080 	return 0;
1081 }
1082 
1083 struct device *
get_device(struct device * dev)1084 get_device(struct device *dev)
1085 {
1086 	if (dev)
1087 		kref_get(&dev->refcount);
1088 	return (dev);
1089 }
1090 
1091 static void
dev_release(struct kref * kref)1092 dev_release(struct kref *kref)
1093 {
1094 #if 0
1095 	struct device *dev =
1096 	container_of(kref, struct device, refcount);
1097 
1098 	/* TODO */
1099 
1100 	free(dev);
1101 #endif
1102 }
1103 
1104 void
put_device(struct device * dev)1105 put_device(struct device *dev)
1106 {
1107 	if (dev)
1108 		kref_put(&dev->refcount, &dev_release);
1109 }
1110 
1111 int
device_move(struct device * dev,struct device * new_parent,int how)1112 device_move(struct device *dev, struct device *new_parent, int how)
1113 {
1114 	if (dev->parent != NULL)
1115 		put_device(dev->parent);
1116 
1117 	dev->parent = new_parent;
1118 
1119 	if (dev->parent != NULL)
1120 		get_device(dev->parent);
1121 
1122 	return (0);
1123 }
1124 
1125 int
driver_register(struct device_driver * drv)1126 driver_register(struct device_driver *drv)
1127 {
1128 	TAILQ_INSERT_TAIL(&device_driver_head, drv, entry);
1129 	return (0);
1130 }
1131 
1132 int
driver_unregister(struct device_driver * drv)1133 driver_unregister(struct device_driver *drv)
1134 {
1135 	if (drv->entry.tqe_prev == NULL)
1136 		return (-EINVAL);
1137 	TAILQ_REMOVE(&device_driver_head, drv, entry);
1138 	drv->entry.tqe_prev = NULL;
1139 	return (0);
1140 }
1141 
1142 int
bus_register(struct bus_type * bus)1143 bus_register(struct bus_type *bus)
1144 {
1145 	TAILQ_INSERT_TAIL(&bus_type_head, bus, entry);
1146 	return (0);
1147 }
1148 
1149 int
bus_unregister(struct bus_type * bus)1150 bus_unregister(struct bus_type *bus)
1151 {
1152 	if (bus->entry.tqe_prev == NULL)
1153 		return (-EINVAL);
1154 	TAILQ_REMOVE(&bus_type_head, bus, entry);
1155 	bus->entry.tqe_prev = NULL;
1156 	return (0);
1157 }
1158 
1159 int
device_add(struct device * dev)1160 device_add(struct device *dev)
1161 {
1162 	struct device_driver *drv;
1163 
1164 	if (dev->bus == NULL) {
1165 		get_device(dev);
1166 		return (0);
1167 	}
1168 	TAILQ_FOREACH(drv, &device_driver_head, entry) {
1169 		if (drv->bus != dev->bus)
1170 			continue;
1171 
1172 		dev->driver = drv;
1173 
1174 		if (dev->bus->match != NULL) {
1175 			if (dev->bus->match(dev, drv) == 0)
1176 				continue;
1177 		}
1178 		if (dev->bus->probe != NULL) {
1179 			if (dev->bus->probe(dev))
1180 				continue;
1181 		}
1182 		get_device(dev);
1183 		return (0);
1184 	}
1185 
1186 	dev->driver = NULL;
1187 	return (-ENXIO);
1188 }
1189 
1190 void
device_del(struct device * dev)1191 device_del(struct device *dev)
1192 {
1193 	if (dev->bus != NULL && dev->bus->remove != NULL) {
1194 		dev->bus->remove(dev);
1195 	}
1196 	dev->driver = NULL;
1197 	put_device(dev);
1198 }
1199 
1200 int
device_register(struct device * dev)1201 device_register(struct device *dev)
1202 {
1203 	return (device_add(dev));
1204 }
1205 
1206 void
device_unregister(struct device * dev)1207 device_unregister(struct device *dev)
1208 {
1209 	device_del(dev);
1210 }
1211 
1212 struct device *
device_create_vargs(struct class * class,struct device * parent,dev_t devt,void * drvdata,const char * fmt,va_list args)1213 device_create_vargs(struct class *class, struct device *parent,
1214     dev_t devt, void *drvdata, const char *fmt, va_list args)
1215 {
1216 	struct device *dev = NULL;
1217 	int retval = -ENODEV;
1218 
1219 	if (class == NULL || IS_ERR(class))
1220 		goto error;
1221 
1222 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1223 	if (!dev) {
1224 		retval = -ENOMEM;
1225 		goto error;
1226 	}
1227 	dev->driver_static.name = "webcamd";
1228 
1229 	/* set a default device name */
1230 	if (class != NULL && class->name != NULL)
1231 		snprintf(dev->name, sizeof(dev->name), "webcamd.%s", class->name);
1232 
1233 	dev->devt = devt;
1234 	dev->class = class;
1235 	dev->parent = parent;
1236 	dev->driver = &dev->driver_static;
1237 	dev_set_drvdata(dev, drvdata);
1238 
1239 	vsnprintf(dev->bus_id, BUS_ID_SIZE, fmt, args);
1240 	retval = device_register(dev);
1241 	if (retval)
1242 		goto error;
1243 
1244 	return dev;
1245 
1246 error:
1247 	put_device(dev);
1248 	return ERR_PTR(retval);
1249 }
1250 
1251 struct device *
device_create(struct class * class,struct device * parent,dev_t devt,void * drvdata,const char * fmt,...)1252 device_create(struct class *class, struct device *parent,
1253     dev_t devt, void *drvdata, const char *fmt,...)
1254 {
1255 	va_list vargs;
1256 	struct device *dev;
1257 
1258 	va_start(vargs, fmt);
1259 	dev = device_create_vargs(class, parent, devt, drvdata, fmt, vargs);
1260 	va_end(vargs);
1261 	return dev;
1262 }
1263 
1264 int
device_enable_async_suspend(struct device * dev)1265 device_enable_async_suspend(struct device *dev)
1266 {
1267 	return (0);
1268 }
1269 
1270 void
device_destroy(struct class * class,dev_t devt)1271 device_destroy(struct class *class, dev_t devt)
1272 {
1273 
1274 }
1275 
1276 void
module_put(struct module * module)1277 module_put(struct module *module)
1278 {
1279 
1280 }
1281 
1282 int
try_module_get(struct module * module)1283 try_module_get(struct module *module)
1284 {
1285 	return (1);
1286 }
1287 
1288 void
module_get(struct module * module)1289 module_get(struct module *module)
1290 {
1291 
1292 }
1293 
1294 void   *
ERR_PTR(long error)1295 ERR_PTR(long error)
1296 {
1297 	return ((void *)error);
1298 }
1299 
1300 long
PTR_ERR(const void * ptr)1301 PTR_ERR(const void *ptr)
1302 {
1303 	return ((long)ptr);
1304 }
1305 
1306 long
IS_ERR(const void * ptr)1307 IS_ERR(const void *ptr)
1308 {
1309 	return IS_ERR_VALUE((unsigned long)ptr);
1310 }
1311 
1312 int
PTR_ERR_OR_ZERO(const void * ptr)1313 PTR_ERR_OR_ZERO(const void *ptr)
1314 {
1315 	if (IS_ERR(ptr))
1316 		return PTR_ERR(ptr);
1317 	else
1318 		return 0;
1319 }
1320 
1321 int
__ffs(int x)1322 __ffs(int x)
1323 {
1324 	return (~(x - 1) & x);
1325 }
1326 
1327 int
__ffz(int x)1328 __ffz(int x)
1329 {
1330 	return ((x + 1) & ~x);
1331 }
1332 
1333 int
fls(int mask)1334 fls(int mask)
1335 {
1336 	int bit;
1337 
1338 	if (mask == 0)
1339 		return (0);
1340 	bit = 1;
1341 	if (mask & 0xFFFF0000U) {
1342 		bit += 16;
1343 		mask = (unsigned int)mask >> 16;
1344 	}
1345 	if (mask & 0xFF00U) {
1346 		bit += 8;
1347 		mask = (unsigned int)mask >> 8;
1348 	}
1349 	if (mask & 0xF0U) {
1350 		bit += 4;
1351 		mask = (unsigned int)mask >> 4;
1352 	}
1353 	if (mask & 0xCU) {
1354 		bit += 2;
1355 		mask = (unsigned int)mask >> 2;
1356 	}
1357 	if (mask & 0x2U) {
1358 		bit += 1;
1359 		mask = (unsigned int)mask >> 1;
1360 	}
1361 	return (bit);
1362 }
1363 
1364 static unsigned long
__flsl(unsigned long mask)1365 __flsl(unsigned long mask)
1366 {
1367 	int bit;
1368 
1369 	if (mask == 0)
1370 		return (0);
1371 	bit = 1;
1372 #if (BITS_PER_LONG > 32)
1373 	if (mask & 0xFFFFFFFF00000000UL) {
1374 		bit += 32;
1375 		mask = (unsigned long)mask >> 32;
1376 	}
1377 #endif
1378 	if (mask & 0xFFFF0000UL) {
1379 		bit += 16;
1380 		mask = (unsigned long)mask >> 16;
1381 	}
1382 	if (mask & 0xFF00UL) {
1383 		bit += 8;
1384 		mask = (unsigned long)mask >> 8;
1385 	}
1386 	if (mask & 0xF0UL) {
1387 		bit += 4;
1388 		mask = (unsigned long)mask >> 4;
1389 	}
1390 	if (mask & 0xCUL) {
1391 		bit += 2;
1392 		mask = (unsigned long)mask >> 2;
1393 	}
1394 	if (mask & 0x2UL) {
1395 		bit += 1;
1396 		mask = (unsigned long)mask >> 1;
1397 	}
1398 	return (bit);
1399 }
1400 
1401 unsigned long
find_next_bit(const unsigned long * addr,unsigned long size,unsigned long offset)1402 find_next_bit(const unsigned long *addr, unsigned long size,
1403     unsigned long offset)
1404 {
1405 	const unsigned long mm = BITS_PER_LONG - 1;
1406 	unsigned long mask = BIT_MASK(offset) - 1UL;
1407 
1408 	while (offset < size) {
1409 		mask = addr[BIT_WORD(offset)] & ~mask;
1410 		if (mask != 0) {
1411 			offset = (ffsl(mask) - 1) | (offset & ~mm);
1412 			break;
1413 		}
1414 		offset += ((~offset) & mm) + 1;
1415 	}
1416 	if (offset > size)
1417 		offset = size;
1418 	return (offset);
1419 }
1420 
1421 unsigned long
find_next_zero_bit(const unsigned long * addr,unsigned long size,unsigned long offset)1422 find_next_zero_bit(const unsigned long *addr, unsigned long size,
1423     unsigned long offset)
1424 {
1425 	const unsigned long mm = BITS_PER_LONG - 1;
1426 	unsigned long mask = BIT_MASK(offset) - 1UL;
1427 
1428 	while (offset < size) {
1429 		mask = (~addr[BIT_WORD(offset)]) & ~mask;
1430 		if (mask != 0) {
1431 			offset = (ffsl(mask) - 1) | (offset & ~mm);
1432 			break;
1433 		}
1434 		offset += ((~offset) & mm) + 1;
1435 	}
1436 	if (offset > size)
1437 		offset = size;
1438 	return (offset);
1439 }
1440 
1441 unsigned long *
bitmap_alloc(unsigned int nbits,gfp_t flags)1442 bitmap_alloc(unsigned int nbits, gfp_t flags)
1443 {
1444 	return (malloc(BITS_TO_LONGS(nbits) * sizeof(long)));
1445 }
1446 
1447 unsigned long *
bitmap_zalloc(unsigned int nbits,gfp_t flags)1448 bitmap_zalloc(unsigned int nbits, gfp_t flags)
1449 {
1450 	return (calloc(BITS_TO_LONGS(nbits), sizeof(long)));
1451 }
1452 
1453 void
bitmap_free(unsigned long * ptr)1454 bitmap_free(unsigned long *ptr)
1455 {
1456 	free(ptr);
1457 }
1458 
1459 void
bitmap_copy(unsigned long * dst,const unsigned long * src,unsigned int nbits)1460 bitmap_copy(unsigned long *dst, const unsigned long *src, unsigned int nbits)
1461 {
1462 	const size_t len = BITS_TO_LONGS(nbits) * sizeof(long);
1463 
1464 	memcpy(dst, src, len);
1465 }
1466 
1467 int
bitmap_weight(const unsigned long * src,unsigned nbits)1468 bitmap_weight(const unsigned long *src, unsigned nbits)
1469 {
1470 	unsigned x;
1471 	unsigned y;
1472 
1473 	for (x = y = 0; x != nbits; x++) {
1474 		if (src[x / BITS_PER_LONG] & BIT_MASK(x))
1475 			y++;
1476 	}
1477 	return (y);
1478 }
1479 
1480 int
bitmap_andnot(unsigned long * dst,const unsigned long * b1,const unsigned long * b2,int nbits)1481 bitmap_andnot(unsigned long *dst, const unsigned long *b1,
1482     const unsigned long *b2, int nbits)
1483 {
1484 	int len = (nbits + BITS_PER_LONG - 1) / BITS_PER_LONG;
1485 	long retval = 0;
1486 	long temp;
1487 	int n;
1488 
1489 	for (n = 0; n != len; n++) {
1490 		temp = b1[n] & ~b2[n];
1491 		dst[n] = temp;
1492 		retval |= temp;
1493 	}
1494 	return (retval != 0);
1495 }
1496 
1497 int
bitmap_and(unsigned long * dst,const unsigned long * b1,const unsigned long * b2,int nbits)1498 bitmap_and(unsigned long *dst, const unsigned long *b1,
1499     const unsigned long *b2, int nbits)
1500 {
1501 	int len = (nbits + BITS_PER_LONG - 1) / BITS_PER_LONG;
1502 	long retval = 0;
1503 	long temp;
1504 	int n;
1505 
1506 	for (n = 0; n != len; n++) {
1507 		temp = b1[n] & b2[n];
1508 		dst[n] = temp;
1509 		retval |= temp;
1510 	}
1511 	return (retval != 0);
1512 }
1513 
1514 void
bitmap_or(unsigned long * dst,const unsigned long * b1,const unsigned long * b2,int nbits)1515 bitmap_or(unsigned long *dst, const unsigned long *b1,
1516     const unsigned long *b2, int nbits)
1517 {
1518 	int len = (nbits + BITS_PER_LONG - 1) / BITS_PER_LONG;
1519 	long temp;
1520 	int n;
1521 
1522 	for (n = 0; n != len; n++) {
1523 		temp = b1[n] | b2[n];
1524 		dst[n] = temp;
1525 	}
1526 }
1527 
1528 void
bitmap_xor(unsigned long * dst,const unsigned long * b1,const unsigned long * b2,int nbits)1529 bitmap_xor(unsigned long *dst, const unsigned long *b1,
1530     const unsigned long *b2, int nbits)
1531 {
1532 	int len = (nbits + BITS_PER_LONG - 1) / BITS_PER_LONG;
1533 	long temp;
1534 	int n;
1535 
1536 	for (n = 0; n != len; n++) {
1537 		temp = b1[n] | b2[n];
1538 		dst[n] = temp;
1539 	}
1540 }
1541 
1542 void
bitmap_fill(unsigned long * dst,unsigned int nbits)1543 bitmap_fill(unsigned long *dst, unsigned int nbits)
1544 {
1545   	const size_t len = BITS_TO_LONGS(nbits) * sizeof(long);
1546 
1547 	memset(dst, 255, len);
1548 }
1549 
1550 void
bitmap_zero(unsigned long * dst,unsigned int nbits)1551 bitmap_zero(unsigned long *dst, unsigned int nbits)
1552 {
1553   	const size_t len = BITS_TO_LONGS(nbits) * sizeof(long);
1554 
1555 	memset(dst, 0, len);
1556 }
1557 
1558 int
bitmap_subset(const unsigned long * pa,const unsigned long * pb,int nbits)1559 bitmap_subset(const unsigned long *pa, const unsigned long *pb, int nbits)
1560 {
1561 	int end = nbits / BITS_PER_LONG;
1562 	int x;
1563 
1564 	for (x = 0; x != end; x++) {
1565 		if (pa[x] & ~pb[x])
1566 			return (0);
1567 	}
1568 
1569 	x = nbits % BITS_PER_LONG;
1570 	if (x) {
1571 		if (pa[end] & ~pb[end] & ((1ULL << x) - 1ULL))
1572 			return (0);
1573 	}
1574 	return (1);
1575 }
1576 
1577 int
bitmap_full(const unsigned long * bitmap,int bits)1578 bitmap_full(const unsigned long *bitmap, int bits)
1579 {
1580 	int k;
1581 	int lim = bits / BITS_PER_LONG;
1582 
1583 	for (k = 0; k < lim; ++k)
1584 		if (~bitmap[k])
1585 			return (0);
1586 
1587 	lim = bits % BITS_PER_LONG;
1588 	if (lim) {
1589 		if ((~bitmap[k]) & ((1ULL << lim) - 1ULL))
1590 			return (0);
1591 	}
1592 	return (1);
1593 }
1594 
1595 void
bitmap_clear(unsigned long * map,int start,int nr)1596 bitmap_clear(unsigned long *map, int start, int nr)
1597 {
1598 	unsigned long *p = map + (start / BITS_PER_LONG);
1599 	int size = start + nr;
1600 	int rem = start % BITS_PER_LONG;
1601 	int bits_to_clear = BITS_PER_LONG - rem;
1602 	unsigned long mask_to_clear = ((1ULL << rem) - 1ULL);
1603 
1604 	while (nr - bits_to_clear >= 0) {
1605 		*p &= ~mask_to_clear;
1606 		nr -= bits_to_clear;
1607 		bits_to_clear = BITS_PER_LONG;
1608 		mask_to_clear = ~0UL;
1609 		p++;
1610 	}
1611 	if (nr) {
1612 		size = size % BITS_PER_LONG;
1613 		mask_to_clear &= ((1ULL << size) - 1ULL);
1614 		*p &= ~mask_to_clear;
1615 	}
1616 }
1617 
1618 void
bitmap_shift_right(unsigned long * dst,const unsigned long * src,int n,int nbits)1619 bitmap_shift_right(unsigned long *dst, const unsigned long *src, int n, int nbits)
1620 {
1621 	int x;
1622 	int y;
1623 
1624 	for (x = 0; x < (nbits - n); x++) {
1625 		y = x + n;
1626 		if (src[y / BITS_PER_LONG] & BIT_MASK(y))
1627 			dst[x / BITS_PER_LONG] |= BIT_MASK(x);
1628 		else
1629 			dst[x / BITS_PER_LONG] &= ~BIT_MASK(x);
1630 	}
1631 	for (; x < nbits; x++)
1632 		dst[x / BITS_PER_LONG] &= ~BIT_MASK(x);
1633 }
1634 
1635 void
bitmap_shift_left(unsigned long * dst,const unsigned long * src,int n,int nbits)1636 bitmap_shift_left(unsigned long *dst, const unsigned long *src, int n, int nbits)
1637 {
1638 	int x;
1639 	int y;
1640 
1641 	for (x = 0; x != n; x++)
1642 		dst[x / BITS_PER_LONG] &= ~BIT_MASK(x);
1643 
1644 	for (; x < nbits; x++) {
1645 		y = x - n;
1646 		if (src[y / BITS_PER_LONG] & BIT_MASK(y))
1647 			dst[x / BITS_PER_LONG] |= BIT_MASK(x);
1648 		else
1649 			dst[x / BITS_PER_LONG] &= ~BIT_MASK(x);
1650 	}
1651 }
1652 
1653 int
bitmap_equal(const unsigned long * pa,const unsigned long * pb,unsigned bits)1654 bitmap_equal(const unsigned long *pa,
1655     const unsigned long *pb, unsigned bits)
1656 {
1657 	unsigned k;
1658 	unsigned lim = bits / BITS_PER_LONG;
1659 
1660 	for (k = 0; k != lim; k++)
1661 		if (pa[k] != pb[k])
1662 			return (0);
1663 
1664 	bits %= BITS_PER_LONG;
1665 	for (lim = 0; lim != bits; lim++) {
1666 		if ((pa[k] ^ pb[k]) & BIT_MASK(lim))
1667 			return (0);
1668 	}
1669 	return (1);
1670 }
1671 
1672 int
bitmap_empty(const unsigned long * pa,unsigned bits)1673 bitmap_empty(const unsigned long *pa, unsigned bits)
1674 {
1675 	unsigned k;
1676 	unsigned lim = bits / BITS_PER_LONG;
1677 
1678 	for (k = 0; k != lim; k++)
1679 		if (pa[k] != 0)
1680 			return (0);
1681 
1682 	bits %= BITS_PER_LONG;
1683 	for (lim = 0; lim != bits; lim++) {
1684 		if (pa[k] & BIT_MASK(lim))
1685 			return (0);
1686 	}
1687 	return (1);
1688 }
1689 
1690 int
bitmap_intersects(const unsigned long * pa,const unsigned long * pb,unsigned bits)1691 bitmap_intersects(const unsigned long *pa,
1692     const unsigned long *pb, unsigned bits)
1693 {
1694 	unsigned k;
1695 	unsigned lim = bits / BITS_PER_LONG;
1696 
1697 	for (k = 0; k != lim; k++)
1698 		if (pa[k] & pb[k])
1699 			return (1);
1700 
1701 	bits %= BITS_PER_LONG;
1702 	for (lim = 0; lim != bits; lim++) {
1703 		if ((pa[k] & pb[k]) & BIT_MASK(lim))
1704 			return (1);
1705 	}
1706 	return (0);
1707 }
1708 
1709 /*
1710  * A fast, small, non-recursive O(nlog n) sort for the Linux kernel
1711  *
1712  * Jan 23 2005  Matt Mackall <mpm@selenic.com>
1713  */
1714 
1715 static void
u32_swap(void * a,void * b,int size)1716 u32_swap(void *a, void *b, int size)
1717 {
1718 	u32 t = *(u32 *) a;
1719 
1720 	*(u32 *) a = *(u32 *) b;
1721 	*(u32 *) b = t;
1722 }
1723 
1724 static void
generic_swap(void * a,void * b,int size)1725 generic_swap(void *a, void *b, int size)
1726 {
1727 	char t;
1728 
1729 	do {
1730 		t = *(char *)a;
1731 		*(char *)a++ = *(char *)b;
1732 		*(char *)b++ = t;
1733 	} while (--size > 0);
1734 }
1735 
1736 void
sort(void * base,size_t num,size_t size,int (* cmp)(const void *,const void *),void (* swap_fn)(void *,void *,int size))1737 sort(void *base, size_t num, size_t size,
1738     int (*cmp) (const void *, const void *),
1739     void (*swap_fn) (void *, void *, int size))
1740 {
1741 	/* pre-scale counters for performance */
1742 	int i = (num / 2 - 1) * size, n = num * size, c, r;
1743 
1744 	if (!swap_fn)
1745 		swap_fn = (size == 4 ? u32_swap : generic_swap);
1746 
1747 	/* heapify */
1748 	for (; i >= 0; i -= size) {
1749 		for (r = i; r * 2 + size < n; r = c) {
1750 			c = r * 2 + size;
1751 			if (c < n - size && cmp(base + c, base + c + size) < 0)
1752 				c += size;
1753 			if (cmp(base + r, base + c) >= 0)
1754 				break;
1755 			swap_fn(base + r, base + c, size);
1756 		}
1757 	}
1758 
1759 	/* sort */
1760 	for (i = n - size; i > 0; i -= size) {
1761 		swap_fn(base, base + i, size);
1762 		for (r = 0; r * 2 + size < i; r = c) {
1763 			c = r * 2 + size;
1764 			if (c < i - size && cmp(base + c, base + c + size) < 0)
1765 				c += size;
1766 			if (cmp(base + r, base + c) >= 0)
1767 				break;
1768 			swap_fn(base + r, base + c, size);
1769 		}
1770 	}
1771 }
1772 
1773 /* standard CRC computation */
1774 
1775 u32
crc32_le(u32 crc,unsigned char const * p,size_t len)1776 crc32_le(u32 crc, unsigned char const *p, size_t len)
1777 {
1778 	uint8_t i;
1779 
1780 	while (len--) {
1781 		crc ^= *p++;
1782 		for (i = 0; i != 8; i++)
1783 			crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0);
1784 	}
1785 	return crc;
1786 }
1787 
1788 u32
crc32_be(u32 crc,unsigned char const * p,size_t len)1789 crc32_be(u32 crc, unsigned char const *p, size_t len)
1790 {
1791 	uint8_t i;
1792 
1793 	while (len--) {
1794 		crc ^= *p++ << 24;
1795 		for (i = 0; i != 8; i++)
1796 			crc =
1797 			    (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : 0);
1798 	}
1799 	return crc;
1800 }
1801 
1802 void   *
kcalloc(size_t n,size_t size,int flags)1803 kcalloc(size_t n, size_t size, int flags)
1804 {
1805 	void *ptr;
1806 
1807 	ptr = malloc(size * n);
1808 	if (ptr != NULL)
1809 		memset(ptr, 0, size * n);
1810 
1811 	return (ptr);
1812 }
1813 
1814 void   *
vmalloc(size_t size)1815 vmalloc(size_t size)
1816 {
1817 	return (malloc_vm(size));
1818 }
1819 
1820 void   *
vzalloc(size_t size)1821 vzalloc(size_t size)
1822 {
1823 	void *ptr = malloc_vm(size);
1824 
1825 	if (ptr != NULL)
1826 		memset(ptr, 0, size);
1827 
1828 	return (ptr);
1829 }
1830 
1831 long
__get_free_page(int flags)1832 __get_free_page(int flags)
1833 {
1834 	return ((long)malloc(PAGE_SIZE));
1835 }
1836 
1837 void
free_page(long ptr)1838 free_page(long ptr)
1839 {
1840 	free((void *)ptr);
1841 }
1842 
1843 struct class *
class_get(struct class * class)1844 class_get(struct class *class)
1845 {
1846 	if (class)
1847 		kref_get(&class->refcount);
1848 	return (class);
1849 }
1850 
1851 static void
class_release(struct kref * kref)1852 class_release(struct kref *kref)
1853 {
1854 	/* TODO */
1855 
1856 }
1857 
1858 struct class *
class_put(struct class * class)1859 class_put(struct class *class)
1860 {
1861 	if (class)
1862 		kref_put(&class->refcount, class_release);
1863 	return (class);
1864 }
1865 
1866 int
class_register(struct class * class)1867 class_register(struct class *class)
1868 {
1869 	return (0);
1870 }
1871 
1872 void
class_unregister(struct class * class)1873 class_unregister(struct class *class)
1874 {
1875 
1876 }
1877 
1878 void
class_destroy(struct class * class)1879 class_destroy(struct class *class)
1880 {
1881 	if ((class == NULL) || (IS_ERR(class)))
1882 		return;
1883 	class_unregister(class);
1884 }
1885 
1886 int
alloc_chrdev_region(dev_t * pdev,unsigned basemin,unsigned count,const char * name)1887 alloc_chrdev_region(dev_t *pdev, unsigned basemin, unsigned count, const char *name)
1888 {
1889 	if (strcmp(name, "lirc") == 0) {
1890 		*pdev = MKDEV(LIRC_MAJOR, basemin);
1891 		return (0);
1892 	} else if (strcmp(name, "roccat") == 0) {
1893 		*pdev = MKDEV(ROCCAT_MAJOR, basemin);
1894 		return (0);
1895 	}
1896 	printf("alloc_chrdev_region: Unknown region name: '%s'\n", name);
1897 	return (-ENOMEM);
1898 }
1899 
1900 int
register_chrdev_region(dev_t from,unsigned count,const char * name)1901 register_chrdev_region(dev_t from, unsigned count, const char *name)
1902 {
1903 	return (0);
1904 }
1905 
1906 void
unregister_chrdev_region(dev_t from,unsigned count)1907 unregister_chrdev_region(dev_t from, unsigned count)
1908 {
1909 	return;
1910 }
1911 
1912 int
remap_pfn_range(struct vm_area_struct * vma,unsigned long start,unsigned long page,unsigned long size,int prot)1913 remap_pfn_range(struct vm_area_struct *vma, unsigned long start,
1914     unsigned long page, unsigned long size, int prot)
1915 {
1916 	/* assuming that pages are virtually contiguous */
1917 	if (start == vma->vm_start)
1918 		vma->vm_buffer_address = (void *)page;
1919 
1920 	return (0);
1921 }
1922 
1923 int
vm_insert_page(struct vm_area_struct * vma,unsigned long start,struct page * page)1924 vm_insert_page(struct vm_area_struct *vma,
1925     unsigned long start, struct page *page)
1926 {
1927 	/* assuming that pages are virtually contiguous */
1928 	if (start == vma->vm_start)
1929 		vma->vm_buffer_address = (void *)page;
1930 
1931 	return (0);
1932 }
1933 
1934 int
remap_vmalloc_range(struct vm_area_struct * vma,void * addr,unsigned long pgoff)1935 remap_vmalloc_range(struct vm_area_struct *vma,
1936     void *addr, unsigned long pgoff)
1937 {
1938 	addr = (uint8_t *)addr + (pgoff << PAGE_SHIFT);
1939 	vma->vm_buffer_address = addr;
1940 
1941 	return (0);
1942 }
1943 
1944 void
jiffies_to_timeval(uint64_t j,struct timeval * tv)1945 jiffies_to_timeval(uint64_t j, struct timeval *tv)
1946 {
1947 	tv->tv_usec = ((j % 1000ULL) * 1000ULL);
1948 	tv->tv_sec = j / 1000ULL;
1949 }
1950 
1951 uint64_t
round_jiffies_relative(uint64_t j)1952 round_jiffies_relative(uint64_t j)
1953 {
1954 	return (j);
1955 }
1956 
1957 uint64_t
sched_clock(void)1958 sched_clock(void)
1959 {
1960 	return (jiffies * (NSEC_PER_SEC / HZ));
1961 }
1962 
1963 int
do_gettimeofday(struct timeval * tp)1964 do_gettimeofday(struct timeval *tp)
1965 {
1966 	return (gettimeofday(tp, NULL));
1967 }
1968 
1969 void
poll_initwait(struct poll_wqueues * pwq)1970 poll_initwait(struct poll_wqueues *pwq)
1971 {
1972 	memset(pwq, 0, sizeof(*pwq));
1973 }
1974 
1975 void
poll_freewait(struct poll_wqueues * pwq)1976 poll_freewait(struct poll_wqueues *pwq)
1977 {
1978 
1979 }
1980 
1981 void
poll_schedule(struct poll_wqueues * pwq,int flag)1982 poll_schedule(struct poll_wqueues *pwq, int flag)
1983 {
1984 	schedule();
1985 }
1986 
1987 int32_t
div_round_closest_s32(int32_t rem,int32_t div)1988 div_round_closest_s32(int32_t rem, int32_t div)
1989 {
1990 	return ((rem + (div / 2)) / div);
1991 }
1992 
1993 uint32_t
div_round_closest_u32(uint32_t rem,uint32_t div)1994 div_round_closest_u32(uint32_t rem, uint32_t div)
1995 {
1996 	return ((rem + (div / 2)) / div);
1997 }
1998 
1999 int64_t
div_round_closest_s64(int64_t rem,int64_t div)2000 div_round_closest_s64(int64_t rem, int64_t div)
2001 {
2002 	return ((rem + (div / 2)) / div);
2003 }
2004 
2005 uint64_t
div_round_closest_u64(uint64_t rem,uint64_t div)2006 div_round_closest_u64(uint64_t rem, uint64_t div)
2007 {
2008 	return ((rem + (div / 2)) / div);
2009 }
2010 
2011 int64_t
div_s64_rem(int64_t dividend,int32_t divisor,int32_t * remainder)2012 div_s64_rem(int64_t dividend, int32_t divisor, int32_t *remainder)
2013 {
2014 	*remainder = dividend % divisor;
2015 	return (dividend / divisor);
2016 }
2017 
2018 struct timespec
ktime_mono_to_real(struct timespec arg)2019 ktime_mono_to_real(struct timespec arg)
2020 {
2021 	return (ktime_add(arg, ktime_mono_to_real_offset));
2022 }
2023 
2024 struct timespec
ktime_get_boottime(void)2025 ktime_get_boottime(void)
2026 {
2027 	struct timespec ts;
2028 
2029 	clock_gettime(CLOCK_UPTIME_FAST, &ts);
2030 
2031 	return (ts);
2032 }
2033 
2034 struct timespec
ktime_get_real(void)2035 ktime_get_real(void)
2036 {
2037 	struct timespec ts;
2038 
2039 	clock_gettime(CLOCK_REALTIME_FAST, &ts);
2040 
2041 	return (ts);
2042 }
2043 
2044 struct timespec
ktime_get(void)2045 ktime_get(void)
2046 {
2047 	struct timespec ts;
2048 
2049 	clock_gettime(CLOCK_REALTIME_FAST, &ts);
2050 
2051 	return (ts);
2052 }
2053 
2054 int64_t
ktime_get_ns(void)2055 ktime_get_ns(void)
2056 {
2057 	return (ktime_to_ns(ktime_get()));
2058 }
2059 
2060 struct timespec
ktime_mono_to_any(struct timespec arg,int off)2061 ktime_mono_to_any(struct timespec arg, int off)
2062 {
2063 	switch (off) {
2064 	case TK_OFFS_REAL:
2065 		return (ktime_add(arg, ktime_mono_to_real_offset));
2066 	case TK_OFFS_BOOT:
2067 		return (ktime_add(arg, ktime_mono_to_uptime_offset));
2068 	default:
2069 		printf("Unknown clock conversion\n");
2070 		return (arg);
2071 	}
2072 }
2073 
2074 struct timeval
ktime_to_timeval(const struct timespec ts)2075 ktime_to_timeval(const struct timespec ts)
2076 {
2077 	struct timeval tv;
2078 
2079 	tv.tv_sec = ts.tv_sec;
2080 	tv.tv_usec = ts.tv_nsec / 1000;
2081 
2082 	return (tv);
2083 }
2084 
2085 void
ktime_get_ts(struct timespec * ts)2086 ktime_get_ts(struct timespec *ts)
2087 {
2088 	clock_gettime(CLOCK_MONOTONIC_FAST, ts);
2089 }
2090 
2091 void
ktime_get_real_ts(struct timespec * ts)2092 ktime_get_real_ts(struct timespec *ts)
2093 {
2094 	clock_gettime(CLOCK_REALTIME_FAST, ts);
2095 }
2096 
2097 int64_t
ktime_to_ns(const struct timespec ts)2098 ktime_to_ns(const struct timespec ts)
2099 {
2100 	return ((((int64_t)ts.tv_sec) *
2101 	    (int64_t)1000000000L) + (int64_t)ts.tv_nsec);
2102 }
2103 
2104 struct timespec
ktime_sub(const struct timespec a,const struct timespec b)2105 ktime_sub(const struct timespec a, const struct timespec b)
2106 {
2107 	struct timespec r;
2108 
2109 	/* do subtraction */
2110 	r.tv_sec = a.tv_sec - b.tv_sec;
2111 	r.tv_nsec = a.tv_nsec - b.tv_nsec;
2112 
2113 	/* carry */
2114 	if (r.tv_nsec < 0) {
2115 		r.tv_nsec += 1000000000LL;
2116 		r.tv_sec--;
2117 	}
2118 	return (r);
2119 }
2120 
2121 struct timespec
ktime_add(const struct timespec a,const struct timespec b)2122 ktime_add(const struct timespec a, const struct timespec b)
2123 {
2124 	struct timespec r;
2125 
2126 	/* do subtraction */
2127 	r.tv_sec = a.tv_sec + b.tv_sec;
2128 	r.tv_nsec = a.tv_nsec + b.tv_nsec;
2129 
2130 	/* carry */
2131 	if (r.tv_nsec >= 1000000000LL) {
2132 		r.tv_nsec -= 1000000000LL;
2133 		r.tv_sec++;
2134 	}
2135 	return (r);
2136 }
2137 
2138 static int
ktime_monotonic_offset_init(void)2139 ktime_monotonic_offset_init(void)
2140 {
2141 	struct timespec ta;
2142 	struct timespec tb;
2143 	struct timespec tc;
2144 
2145 	clock_gettime(CLOCK_MONOTONIC, &ta);
2146 	clock_gettime(CLOCK_REALTIME, &tb);
2147 	clock_gettime(CLOCK_UPTIME, &tc);
2148 
2149 	ktime_mono_to_real_offset = ktime_sub(tb, ta);
2150 	ktime_mono_to_uptime_offset = ktime_sub(tc, ta);
2151 
2152 	return (0);
2153 }
2154 
2155 module_init(ktime_monotonic_offset_init);
2156 
2157 struct timespec
ktime_get_monotonic_offset(void)2158 ktime_get_monotonic_offset(void)
2159 {
2160 	return (ktime_mono_to_real_offset);
2161 }
2162 
2163 struct timespec
ktime_add_us(const struct timespec t,const uint64_t usec)2164 ktime_add_us(const struct timespec t, const uint64_t usec)
2165 {
2166 	struct timespec temp;
2167 
2168 	temp.tv_nsec = 1000 * (usec % 1000000ULL);
2169 	temp.tv_sec = usec / 1000000ULL;
2170 
2171 	return (timespec_add(t, temp));
2172 }
2173 
2174 int64_t
ktime_us_delta(const struct timespec last,const struct timespec first)2175 ktime_us_delta(const struct timespec last, const struct timespec first)
2176 {
2177 	return (ktime_to_us(ktime_sub(last, first)));
2178 }
2179 
2180 int64_t
ktime_to_us(const struct timespec t)2181 ktime_to_us(const struct timespec t)
2182 {
2183 	struct timeval tv = ktime_to_timeval(t);
2184 
2185 	return (((int64_t)tv.tv_sec * 1000000LL) + tv.tv_usec);
2186 }
2187 
2188 int64_t
ktime_to_ms(const struct timespec t)2189 ktime_to_ms(const struct timespec t)
2190 {
2191 	return (((int64_t)t.tv_sec * 1000LL) + (t.tv_nsec / 1000000LL));
2192 }
2193 
2194 struct timespec
ktime_set(const s64 secs,const unsigned long nsecs)2195 ktime_set(const s64 secs, const unsigned long nsecs)
2196 {
2197 	struct timespec ts = {.tv_sec = secs,.tv_nsec = nsecs};
2198 
2199 	return (ts);
2200 }
2201 
2202 int64_t
ktime_ms_delta(const ktime_t last,const ktime_t first)2203 ktime_ms_delta(const ktime_t last, const ktime_t first)
2204 {
2205 	return (ktime_to_ms(ktime_sub(last, first)));
2206 }
2207 
2208 int
ktime_compare(const ktime_t a,const ktime_t b)2209 ktime_compare(const ktime_t a, const ktime_t b)
2210 {
2211 	if (a.tv_sec == b.tv_sec) {
2212 		if (a.tv_nsec > b.tv_nsec)
2213 			return (1);
2214 		else if (a.tv_nsec < b.tv_nsec)
2215 			return (-1);
2216 		else
2217 			return (0);
2218 	} else if (a.tv_sec > b.tv_sec)
2219 		return (1);
2220 	else if (a.tv_sec < b.tv_sec)
2221 		return (-1);
2222 	else
2223 		return (0);
2224 }
2225 
2226 u64
timeval_to_ns(const struct timeval * tv)2227 timeval_to_ns(const struct timeval *tv)
2228 {
2229 	return ((u64) tv->tv_sec * NSEC_PER_SEC) + ((u64) tv->tv_usec * 1000ULL);
2230 }
2231 
2232 struct timeval
ns_to_timeval(u64 nsec)2233 ns_to_timeval(u64 nsec)
2234 {
2235 	struct timeval tv = {.tv_sec = nsec / NSEC_PER_SEC,
2236 	.tv_usec = (nsec % NSEC_PER_SEC) / 1000ULL};
2237 
2238 	return (tv);
2239 }
2240 
2241 struct timespec
current_kernel_time(void)2242 current_kernel_time(void)
2243 {
2244 	struct timespec ts;
2245 
2246 	ktime_get_real_ts(&ts);
2247 	return (ts);
2248 }
2249 
2250 int64_t
timespec_to_ns(const struct timespec * ts)2251 timespec_to_ns(const struct timespec *ts)
2252 {
2253 	return ((ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec);
2254 }
2255 
2256 struct timespec
ns_to_timespec(const int64_t nsec)2257 ns_to_timespec(const int64_t nsec)
2258 {
2259 	struct timespec ts;
2260 	int32_t rem;
2261 
2262 	if (nsec == 0) {
2263 		ts.tv_sec = 0;
2264 		ts.tv_nsec = 0;
2265 		return (ts);
2266 	}
2267 
2268 	ts.tv_sec = nsec / NSEC_PER_SEC;
2269 	rem = nsec % NSEC_PER_SEC;
2270 	if (rem < 0) {
2271 		ts.tv_sec--;
2272 		rem += NSEC_PER_SEC;
2273 	}
2274 	ts.tv_nsec = rem;
2275 	return (ts);
2276 }
2277 
2278 struct timespec
timespec_add(struct timespec vvp,struct timespec uvp)2279 timespec_add(struct timespec vvp, struct timespec uvp)
2280 {
2281 	vvp.tv_sec += uvp.tv_sec;
2282 	vvp.tv_nsec += uvp.tv_nsec;
2283 	if (vvp.tv_nsec >= 1000000000L) {
2284 		vvp.tv_sec++;
2285 		vvp.tv_nsec -= 1000000000L;
2286 	}
2287 	return (vvp);
2288 }
2289 
2290 struct timespec
timespec_sub(struct timespec vvp,struct timespec uvp)2291 timespec_sub(struct timespec vvp, struct timespec uvp)
2292 {
2293 	vvp.tv_sec -= uvp.tv_sec;
2294 	vvp.tv_nsec -= uvp.tv_nsec;
2295 	if (vvp.tv_nsec < 0) {
2296 		vvp.tv_sec--;
2297 		vvp.tv_nsec += 1000000000L;
2298 	}
2299 	return (vvp);
2300 }
2301 
2302 void
msleep(uint32_t ms)2303 msleep(uint32_t ms)
2304 {
2305 	uint32_t drops;
2306 
2307 	atomic_lock();
2308 	drops = atomic_drop();
2309 	atomic_unlock();
2310 
2311 	usleep(ms * 1000);
2312 
2313 	atomic_lock();
2314 	atomic_pickup(drops);
2315 	atomic_unlock();
2316 }
2317 
2318 void
ssleep(uint32_t s)2319 ssleep(uint32_t s)
2320 {
2321 	msleep(s * 1000);
2322 }
2323 
2324 uint32_t
msleep_interruptible(uint32_t ms)2325 msleep_interruptible(uint32_t ms)
2326 {
2327 	msleep(ms);
2328 	return (0);
2329 }
2330 
2331 int
request_module(const char * ptr,...)2332 request_module(const char *ptr,...)
2333 {
2334 	return (0);
2335 }
2336 
2337 int
request_module_nowait(const char * ptr,...)2338 request_module_nowait(const char *ptr,...)
2339 {
2340 	return (0);
2341 }
2342 
2343 int
device_can_wakeup(struct device * dev)2344 device_can_wakeup(struct device *dev)
2345 {
2346 	return (-EINVAL);
2347 }
2348 
2349 void
device_init_wakeup(struct device * dev,int flags)2350 device_init_wakeup(struct device *dev, int flags)
2351 {
2352 }
2353 
2354 void
device_initialize(struct device * dev)2355 device_initialize(struct device *dev)
2356 {
2357 }
2358 
2359 int
dmi_check_system(const struct dmi_system_id * list)2360 dmi_check_system(const struct dmi_system_id *list)
2361 {
2362 	return (0);
2363 }
2364 
2365 unsigned long
clear_user(void * to,unsigned long size)2366 clear_user(void *to, unsigned long size)
2367 {
2368 	static const uint8_t buf[256];
2369 
2370 	uint8_t *ptr = to;
2371 
2372 	while (size > sizeof(buf)) {
2373 		if (copy_to_user(ptr, buf, sizeof(buf)))
2374 			return (size);
2375 
2376 		ptr += sizeof(buf);
2377 		size -= sizeof(buf);
2378 	}
2379 
2380 	if (size > 0)
2381 		return (copy_to_user(ptr, buf, size));
2382 
2383 	return (0);
2384 }
2385 
2386 void
swab16s(uint16_t * ptr)2387 swab16s(uint16_t *ptr)
2388 {
2389 	*ptr = bswap16(*ptr);
2390 }
2391 
2392 uint16_t
swab16(uint16_t temp)2393 swab16(uint16_t temp)
2394 {
2395 	return (bswap16(temp));
2396 }
2397 
2398 void
swab32s(uint32_t * ptr)2399 swab32s(uint32_t *ptr)
2400 {
2401 	*ptr = bswap32(*ptr);
2402 }
2403 
2404 uint32_t
swab32(uint32_t temp)2405 swab32(uint32_t temp)
2406 {
2407 	return (bswap32(temp));
2408 }
2409 
2410 int
scnprintf(char * buf,size_t size,const char * fmt,...)2411 scnprintf(char *buf, size_t size, const char *fmt,...)
2412 {
2413 	va_list args;
2414 	int retval;
2415 
2416 	va_start(args, fmt);
2417 	retval = vsnprintf(buf, size, fmt, args);
2418 	va_end(args);
2419 
2420 	return ((retval >= size) ? (size - 1) : retval);
2421 }
2422 
2423 #undef do_div
2424 
2425 uint32_t
do_div(uint64_t * rem,uint32_t div)2426 do_div(uint64_t *rem, uint32_t div)
2427 {
2428 	uint64_t val = *rem;
2429 
2430 	*rem = val / div;
2431 
2432 	return (val % div);
2433 }
2434 
2435 int
sysfs_create_group(struct kobject * kobj,const struct attribute_group * grp)2436 sysfs_create_group(struct kobject *kobj,
2437     const struct attribute_group *grp)
2438 {
2439 	return (0);
2440 }
2441 
2442 void
sysfs_remove_group(struct kobject * kobj,const struct attribute_group * grp)2443 sysfs_remove_group(struct kobject *kobj,
2444     const struct attribute_group *grp)
2445 {
2446 }
2447 
2448 int
sysfs_create_bin_file(struct kobject * kobj,struct bin_attribute * attr)2449 sysfs_create_bin_file(struct kobject *kobj, struct bin_attribute *attr)
2450 {
2451 	return (0);
2452 }
2453 
2454 int
sysfs_remove_bin_file(struct kobject * kobj,struct bin_attribute * attr)2455 sysfs_remove_bin_file(struct kobject *kobj, struct bin_attribute *attr)
2456 {
2457 	return (0);
2458 }
2459 
2460 void   *
pci_zalloc_consistent(struct pci_dev * hwdev,size_t size,dma_addr_t * dma_addr)2461 pci_zalloc_consistent(struct pci_dev *hwdev, size_t size,
2462     dma_addr_t *dma_addr)
2463 {
2464 	return (pci_alloc_consistent(hwdev, size, dma_addr));
2465 }
2466 
2467 void   *
pci_alloc_consistent(struct pci_dev * hwdev,size_t size,dma_addr_t * dma_addr)2468 pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
2469     dma_addr_t *dma_addr)
2470 {
2471 	void *ptr;
2472 
2473 	if (dma_addr)
2474 		*dma_addr = 0;
2475 	ptr = malloc(size);
2476 	if (ptr)
2477 		memset(ptr, 0, size);
2478 	return (ptr);
2479 }
2480 
2481 void
pci_free_consistent(struct pci_dev * hwdev,size_t size,void * vaddr,dma_addr_t dma_handle)2482 pci_free_consistent(struct pci_dev *hwdev, size_t size,
2483     void *vaddr, dma_addr_t dma_handle)
2484 {
2485 	free(vaddr);
2486 }
2487 
2488 int
add_uevent_var(struct kobj_uevent_env * env,const char * format,...)2489 add_uevent_var(struct kobj_uevent_env *env, const char *format,...)
2490 {
2491 	return (0);
2492 }
2493 
2494 struct class *
class_create(struct module * owner,const char * name)2495 class_create(struct module *owner, const char *name)
2496 {
2497 	struct class *class;
2498 
2499 	class = malloc(sizeof(*class));
2500 
2501 	if (class == NULL)
2502 		return (NULL);
2503 
2504 	memset(class, 0, sizeof(*class));
2505 
2506 	class->name = name;
2507 
2508 	return (class);
2509 }
2510 
2511 int
usb_register_dev(struct usb_interface * iface,struct usb_class_driver * info)2512 usb_register_dev(struct usb_interface *iface, struct usb_class_driver *info)
2513 {
2514 	return (0);
2515 }
2516 
2517 void
usb_deregister_dev(struct usb_interface * iface,struct usb_class_driver * info)2518 usb_deregister_dev(struct usb_interface *iface, struct usb_class_driver *info)
2519 {
2520 
2521 }
2522 
2523 struct usb_interface *
usb_find_interface(struct usb_driver * drv,int minor)2524 usb_find_interface(struct usb_driver *drv, int minor)
2525 {
2526 	return (NULL);			/* not supported */
2527 }
2528 
2529 void   *
kmemdup(const void * src,size_t len,gfp_t gfp)2530 kmemdup(const void *src, size_t len, gfp_t gfp)
2531 {
2532 	void *p;
2533 
2534 	p = malloc(len);
2535 	if (p)
2536 		memcpy(p, src, len);
2537 
2538 	return (p);
2539 }
2540 
2541 void   *
memdup_user(const void * src,size_t len)2542 memdup_user(const void *src, size_t len)
2543 {
2544 	void *p;
2545 
2546 	p = malloc(len);
2547 	if (p == NULL)
2548 		return (ERR_PTR(-ENOMEM));
2549 
2550 	if (copy_from_user(p, src, len)) {
2551 		free(p);
2552 		return ERR_PTR(-EFAULT);
2553 	}
2554 	return (p);
2555 }
2556 
2557 unsigned long
rounddown_pow_of_two(unsigned long x)2558 rounddown_pow_of_two(unsigned long x)
2559 {
2560 	if (x == 0)
2561 		return (0);
2562 	else
2563 		return (1UL << (__flsl(x) - 1));
2564 }
2565 
2566 unsigned long
roundup_pow_of_two(unsigned long x)2567 roundup_pow_of_two(unsigned long x)
2568 {
2569 	if (x == 0)
2570 		return (0);
2571 	else
2572 		return (1UL << __flsl((x) - 1));
2573 }
2574 
2575 const char *
skip_spaces(const char * str)2576 skip_spaces(const char *str)
2577 {
2578 	while (isspace(*str))
2579 		str++;
2580 	return ((const char *)str);
2581 }
2582 
2583 uint64_t
div64_u64(uint64_t rem,uint64_t div)2584 div64_u64(uint64_t rem, uint64_t div)
2585 {
2586 	return (rem / div);
2587 }
2588 
2589 int64_t
div64_s64(int64_t rem,int64_t div)2590 div64_s64(int64_t rem, int64_t div)
2591 {
2592 	return (rem / div);
2593 }
2594 
2595 int64_t
div_s64(int64_t rem,int32_t div)2596 div_s64(int64_t rem, int32_t div)
2597 {
2598 	return (rem / (int64_t)div);
2599 }
2600 
2601 uint64_t
div_u64(uint64_t rem,uint32_t div)2602 div_u64(uint64_t rem, uint32_t div)
2603 {
2604 	return (rem / (uint64_t)div);
2605 }
2606 
2607 uint64_t
div_u64_rem(uint64_t rem,uint32_t div,uint32_t * prem)2608 div_u64_rem(uint64_t rem, uint32_t div, uint32_t *prem)
2609 {
2610 	*prem = rem % (uint64_t)div;
2611 	return (rem / (uint64_t)div);
2612 }
2613 
2614 int
nonseekable_open(struct inode * inode,struct file * file)2615 nonseekable_open(struct inode *inode, struct file *file)
2616 {
2617 	return (0);
2618 }
2619 
2620 int
stream_open(struct inode * inode,struct file * file)2621 stream_open(struct inode *inode, struct file *file)
2622 {
2623 	return (0);
2624 }
2625 
2626 int
kobject_set_name(struct kobject * kobj,const char * fmt,...)2627 kobject_set_name(struct kobject *kobj, const char *fmt,...)
2628 {
2629 	return (0);
2630 }
2631 
2632 int
zero_nop(void)2633 zero_nop(void)
2634 {
2635 	return (0);
2636 }
2637 
2638 int
kstrtos16(const char * nptr,unsigned int base,int16_t * res)2639 kstrtos16(const char *nptr, unsigned int base, int16_t *res)
2640 {
2641 	long long temp;
2642 	char *pp = NULL;
2643 
2644 	*res = 0;
2645 
2646 	if (base < 2 || base > 35)
2647 		return (-EINVAL);
2648 	temp = strtoll(nptr, &pp, base);
2649 	if (pp && pp[0])
2650 		return (-EINVAL);
2651 	if (temp != (long long)(int16_t)temp)
2652 		return (-ERANGE);
2653 
2654 	*res = temp;
2655 	return (0);
2656 }
2657 
2658 int
kstrtou16(const char * nptr,unsigned int base,uint16_t * res)2659 kstrtou16(const char *nptr, unsigned int base, uint16_t *res)
2660 {
2661 	unsigned long long temp;
2662 	char *pp = NULL;
2663 
2664 	*res = 0;
2665 
2666 	if (base < 2 || base > 35)
2667 		return (-EINVAL);
2668 	temp = strtoull(nptr, &pp, base);
2669 	if (pp && pp[0])
2670 		return (-EINVAL);
2671 	if (temp != (unsigned long long)(uint16_t)temp)
2672 		return (-ERANGE);
2673 
2674 	*res = temp;
2675 	return (0);
2676 }
2677 
2678 int
kstrtos8(const char * nptr,unsigned int base,int8_t * res)2679 kstrtos8(const char *nptr, unsigned int base, int8_t *res)
2680 {
2681 	long long temp;
2682 	char *pp = NULL;
2683 
2684 	*res = 0;
2685 
2686 	if (base < 2 || base > 35)
2687 		return (-EINVAL);
2688 	temp = strtoll(nptr, &pp, base);
2689 	if (pp && pp[0])
2690 		return (-EINVAL);
2691 	if (temp != (long long)(int8_t)temp)
2692 		return (-ERANGE);
2693 
2694 	*res = temp;
2695 	return (0);
2696 }
2697 
2698 int
kstrtou8(const char * nptr,unsigned int base,uint8_t * res)2699 kstrtou8(const char *nptr, unsigned int base, uint8_t *res)
2700 {
2701 	unsigned long long temp;
2702 	char *pp = NULL;
2703 
2704 	*res = 0;
2705 
2706 	if (base < 2 || base > 35)
2707 		return (-EINVAL);
2708 	temp = strtoull(nptr, &pp, base);
2709 	if (pp && pp[0])
2710 		return (-EINVAL);
2711 	if (temp != (unsigned long long)(uint8_t)temp)
2712 		return (-ERANGE);
2713 
2714 	*res = temp;
2715 	return (0);
2716 }
2717 
2718 int
kstrtouint(const char * nptr,unsigned int base,unsigned int * res)2719 kstrtouint(const char *nptr, unsigned int base, unsigned int *res)
2720 {
2721 	unsigned long long temp;
2722 	char *pp = NULL;
2723 
2724 	*res = 0;
2725 
2726 	if (base < 2 || base > 35)
2727 		return (-EINVAL);
2728 	temp = strtoull(nptr, &pp, base);
2729 	if (pp && pp[0])
2730 		return (-EINVAL);
2731 	if (temp != (unsigned long long)(unsigned int)temp)
2732 		return (-ERANGE);
2733 
2734 	*res = temp;
2735 	return (0);
2736 }
2737 
2738 int
kstrtoint(const char * nptr,unsigned int base,int * res)2739 kstrtoint(const char *nptr, unsigned int base, int *res)
2740 {
2741 	long long temp;
2742 	char *pp = NULL;
2743 
2744 	*res = 0;
2745 
2746 	if (base < 2 || base > 35)
2747 		return (-EINVAL);
2748 	temp = strtoll(nptr, &pp, base);
2749 	if (pp && pp[0])
2750 		return (-EINVAL);
2751 	if (temp != (long long)(int)temp)
2752 		return (-ERANGE);
2753 
2754 	*res = temp;
2755 	return (0);
2756 }
2757 
2758 int
kstrtoul(const char * nptr,unsigned int base,unsigned long * res)2759 kstrtoul(const char *nptr, unsigned int base, unsigned long *res)
2760 {
2761 	long long temp;
2762 	char *pp = NULL;
2763 
2764 	*res = 0;
2765 
2766 	if (base < 2 || base > 35)
2767 		return (-EINVAL);
2768 	temp = strtoul(nptr, &pp, base);
2769 	if (pp && pp[0])
2770 		return (-EINVAL);
2771 	*res = temp;
2772 	return (0);
2773 }
2774 
2775 int
kstrtobool(const char * nptr,bool * res)2776 kstrtobool(const char *nptr, bool *res)
2777 {
2778 	if (nptr == NULL)
2779 		return (-EINVAL);
2780 
2781 	switch (nptr[0]) {
2782 	case 'y':
2783 	case 'Y':
2784 	case '1':
2785 		*res = true;
2786 		return (0);
2787 	case 'n':
2788 	case 'N':
2789 	case '0':
2790 		*res = false;
2791 		return (0);
2792 	case 'o':
2793 	case 'O':
2794 		switch (nptr[1]) {
2795 		case 'n':
2796 		case 'N':
2797 			*res = true;
2798 			return (0);
2799 		case 'f':
2800 		case 'F':
2801 			*res = false;
2802 			return (0);
2803 		default:
2804 			break;
2805 		}
2806 		break;
2807 	default:
2808 		break;
2809 	}
2810 	return (-EINVAL);
2811 }
2812 
2813 /* The following function was copied from the Linux Kernel sources, fs/libfs.c */
2814 
2815 ssize_t
simple_read_from_buffer(void __user * to,size_t count,loff_t * ppos,const void * from,size_t available)2816 simple_read_from_buffer(void __user * to, size_t count, loff_t *ppos,
2817     const void *from, size_t available)
2818 {
2819 	loff_t pos = *ppos;
2820 	size_t ret;
2821 
2822 	if (pos < 0)
2823 		return (-EINVAL);
2824 	if (pos >= available || count == 0)
2825 		return (0);
2826 	if (count > available - pos)
2827 		count = available - pos;
2828 	ret = copy_to_user(to, from + pos, count);
2829 	if (ret == count)
2830 		return (-EFAULT);
2831 	count -= ret;
2832 	*ppos = pos + count;
2833 	return (count);
2834 }
2835 
2836 /* The following function was copied from the Linux Kernel sources, fs/libfs.c */
2837 
2838 ssize_t
simple_write_to_buffer(void * to,size_t available,loff_t * ppos,const void __user * from,size_t count)2839 simple_write_to_buffer(void *to, size_t available,
2840     loff_t *ppos, const void __user * from, size_t count)
2841 {
2842 	loff_t pos = *ppos;
2843 	size_t ret;
2844 
2845 	if (pos < 0)
2846 		return (-EINVAL);
2847 	if (pos >= available || count == 0)
2848 		return (0);
2849 	if (count > available - pos)
2850 		count = available - pos;
2851 	ret = copy_from_user(to + pos, from, count);
2852 	if (ret == count)
2853 		return (-EFAULT);
2854 	count -= ret;
2855 	*ppos = pos + count;
2856 	return (count);
2857 }
2858 
2859 struct power_supply *
power_supply_register(struct device * parent,const struct power_supply_desc * desc,const struct power_supply_config * cfg)2860 power_supply_register(struct device *parent,
2861     const struct power_supply_desc *desc,
2862     const struct power_supply_config *cfg)
2863 {
2864 	return (NULL);
2865 }
2866 
2867 struct power_supply *
devm_power_supply_register(struct device * parent,const struct power_supply_desc * desc,const struct power_supply_config * cfg)2868 devm_power_supply_register(struct device *parent,
2869     const struct power_supply_desc *desc,
2870     const struct power_supply_config *cfg)
2871 {
2872 	return (NULL);
2873 }
2874 
2875 void
power_supply_unregister(struct power_supply * psy)2876 power_supply_unregister(struct power_supply *psy)
2877 {
2878 
2879 }
2880 
2881 int
power_supply_powers(struct power_supply * psy,struct device * dev)2882 power_supply_powers(struct power_supply *psy, struct device *dev)
2883 {
2884 	return (0);
2885 }
2886 
2887 void
power_supply_changed(struct power_supply * psy)2888 power_supply_changed(struct power_supply *psy)
2889 {
2890 }
2891 
2892 void   *
power_supply_get_drvdata(struct power_supply * psy)2893 power_supply_get_drvdata(struct power_supply *psy)
2894 {
2895 	return (NULL);
2896 }
2897 
2898 int
led_classdev_register_ext(struct device * parent,struct led_classdev * led_cdev,struct led_init_data * init_data)2899 led_classdev_register_ext(struct device *parent,
2900 			  struct led_classdev *led_cdev,
2901 			  struct led_init_data *init_data)
2902 {
2903 	return (0);
2904 }
2905 
2906 int
devm_led_classdev_register_ext(struct device * parent,struct led_classdev * led_cdev,struct led_init_data * init_data)2907 devm_led_classdev_register_ext(struct device *parent,
2908 			       struct led_classdev *led_cdev,
2909 			       struct led_init_data *init_data)
2910 {
2911 	return (0);
2912 }
2913 
2914 int
devm_of_led_classdev_register(struct device * parent,struct device_node * node,struct led_classdev * led_cdev)2915 devm_of_led_classdev_register(struct device *parent, struct device_node *node, struct led_classdev *led_cdev)
2916 {
2917 	return (0);
2918 }
2919 
2920 int
of_led_classdev_register(struct device * parent,struct device_node * node,struct led_classdev * led_cdev)2921 of_led_classdev_register(struct device *parent, struct device_node *node, struct led_classdev *led_cdev)
2922 {
2923 	return (0);
2924 }
2925 
2926 void
led_classdev_unregister(struct led_classdev * led_cdev)2927 led_classdev_unregister(struct led_classdev *led_cdev)
2928 {
2929 
2930 }
2931 
2932 void
led_classdev_suspend(struct led_classdev * led_cdev)2933 led_classdev_suspend(struct led_classdev *led_cdev)
2934 {
2935 
2936 }
2937 
2938 void
led_classdev_resume(struct led_classdev * led_cdev)2939 led_classdev_resume(struct led_classdev *led_cdev)
2940 {
2941 
2942 }
2943 
2944 /* "int_sqrt" was copied from HPS's libmbin */
2945 
2946 uint64_t
int_sqrt(uint64_t a)2947 int_sqrt(uint64_t a)
2948 {
2949 	uint64_t b = 0x4000000000000000ULL;
2950 
2951 	if (a >= b) {
2952 		a -= b;
2953 		b >>= 1;
2954 		b ^= 0x7000000000000000ULL;
2955 	} else {
2956 		b >>= 1;
2957 		b ^= 0x3000000000000000ULL;
2958 	}
2959 	if (a >= b) {
2960 		a -= b;
2961 		b >>= 1;
2962 		b ^= 0x1c00000000000000ULL;
2963 	} else {
2964 		b >>= 1;
2965 		b ^= 0xc00000000000000ULL;
2966 	}
2967 	if (a >= b) {
2968 		a -= b;
2969 		b >>= 1;
2970 		b ^= 0x700000000000000ULL;
2971 	} else {
2972 		b >>= 1;
2973 		b ^= 0x300000000000000ULL;
2974 	}
2975 	if (a >= b) {
2976 		a -= b;
2977 		b >>= 1;
2978 		b ^= 0x1c0000000000000ULL;
2979 	} else {
2980 		b >>= 1;
2981 		b ^= 0xc0000000000000ULL;
2982 	}
2983 	if (a >= b) {
2984 		a -= b;
2985 		b >>= 1;
2986 		b ^= 0x70000000000000ULL;
2987 	} else {
2988 		b >>= 1;
2989 		b ^= 0x30000000000000ULL;
2990 	}
2991 	if (a >= b) {
2992 		a -= b;
2993 		b >>= 1;
2994 		b ^= 0x1c000000000000ULL;
2995 	} else {
2996 		b >>= 1;
2997 		b ^= 0xc000000000000ULL;
2998 	}
2999 	if (a >= b) {
3000 		a -= b;
3001 		b >>= 1;
3002 		b ^= 0x7000000000000ULL;
3003 	} else {
3004 		b >>= 1;
3005 		b ^= 0x3000000000000ULL;
3006 	}
3007 	if (a >= b) {
3008 		a -= b;
3009 		b >>= 1;
3010 		b ^= 0x1c00000000000ULL;
3011 	} else {
3012 		b >>= 1;
3013 		b ^= 0xc00000000000ULL;
3014 	}
3015 	if (a >= b) {
3016 		a -= b;
3017 		b >>= 1;
3018 		b ^= 0x700000000000ULL;
3019 	} else {
3020 		b >>= 1;
3021 		b ^= 0x300000000000ULL;
3022 	}
3023 	if (a >= b) {
3024 		a -= b;
3025 		b >>= 1;
3026 		b ^= 0x1c0000000000ULL;
3027 	} else {
3028 		b >>= 1;
3029 		b ^= 0xc0000000000ULL;
3030 	}
3031 	if (a >= b) {
3032 		a -= b;
3033 		b >>= 1;
3034 		b ^= 0x70000000000ULL;
3035 	} else {
3036 		b >>= 1;
3037 		b ^= 0x30000000000ULL;
3038 	}
3039 	if (a >= b) {
3040 		a -= b;
3041 		b >>= 1;
3042 		b ^= 0x1c000000000ULL;
3043 	} else {
3044 		b >>= 1;
3045 		b ^= 0xc000000000ULL;
3046 	}
3047 	if (a >= b) {
3048 		a -= b;
3049 		b >>= 1;
3050 		b ^= 0x7000000000ULL;
3051 	} else {
3052 		b >>= 1;
3053 		b ^= 0x3000000000ULL;
3054 	}
3055 	if (a >= b) {
3056 		a -= b;
3057 		b >>= 1;
3058 		b ^= 0x1c00000000ULL;
3059 	} else {
3060 		b >>= 1;
3061 		b ^= 0xc00000000ULL;
3062 	}
3063 	if (a >= b) {
3064 		a -= b;
3065 		b >>= 1;
3066 		b ^= 0x700000000ULL;
3067 	} else {
3068 		b >>= 1;
3069 		b ^= 0x300000000ULL;
3070 	}
3071 	if (a >= b) {
3072 		a -= b;
3073 		b >>= 1;
3074 		b ^= 0x1c0000000ULL;
3075 	} else {
3076 		b >>= 1;
3077 		b ^= 0xc0000000ULL;
3078 	}
3079 	if (a >= b) {
3080 		a -= b;
3081 		b >>= 1;
3082 		b ^= 0x70000000ULL;
3083 	} else {
3084 		b >>= 1;
3085 		b ^= 0x30000000ULL;
3086 	}
3087 	if (a >= b) {
3088 		a -= b;
3089 		b >>= 1;
3090 		b ^= 0x1c000000ULL;
3091 	} else {
3092 		b >>= 1;
3093 		b ^= 0xc000000ULL;
3094 	}
3095 	if (a >= b) {
3096 		a -= b;
3097 		b >>= 1;
3098 		b ^= 0x7000000ULL;
3099 	} else {
3100 		b >>= 1;
3101 		b ^= 0x3000000ULL;
3102 	}
3103 	if (a >= b) {
3104 		a -= b;
3105 		b >>= 1;
3106 		b ^= 0x1c00000ULL;
3107 	} else {
3108 		b >>= 1;
3109 		b ^= 0xc00000ULL;
3110 	}
3111 	if (a >= b) {
3112 		a -= b;
3113 		b >>= 1;
3114 		b ^= 0x700000ULL;
3115 	} else {
3116 		b >>= 1;
3117 		b ^= 0x300000ULL;
3118 	}
3119 	if (a >= b) {
3120 		a -= b;
3121 		b >>= 1;
3122 		b ^= 0x1c0000ULL;
3123 	} else {
3124 		b >>= 1;
3125 		b ^= 0xc0000ULL;
3126 	}
3127 	if (a >= b) {
3128 		a -= b;
3129 		b >>= 1;
3130 		b ^= 0x70000ULL;
3131 	} else {
3132 		b >>= 1;
3133 		b ^= 0x30000ULL;
3134 	}
3135 	if (a >= b) {
3136 		a -= b;
3137 		b >>= 1;
3138 		b ^= 0x1c000ULL;
3139 	} else {
3140 		b >>= 1;
3141 		b ^= 0xc000ULL;
3142 	}
3143 	if (a >= b) {
3144 		a -= b;
3145 		b >>= 1;
3146 		b ^= 0x7000ULL;
3147 	} else {
3148 		b >>= 1;
3149 		b ^= 0x3000ULL;
3150 	}
3151 	if (a >= b) {
3152 		a -= b;
3153 		b >>= 1;
3154 		b ^= 0x1c00ULL;
3155 	} else {
3156 		b >>= 1;
3157 		b ^= 0xc00ULL;
3158 	}
3159 	if (a >= b) {
3160 		a -= b;
3161 		b >>= 1;
3162 		b ^= 0x700ULL;
3163 	} else {
3164 		b >>= 1;
3165 		b ^= 0x300ULL;
3166 	}
3167 	if (a >= b) {
3168 		a -= b;
3169 		b >>= 1;
3170 		b ^= 0x1c0ULL;
3171 	} else {
3172 		b >>= 1;
3173 		b ^= 0xc0ULL;
3174 	}
3175 	if (a >= b) {
3176 		a -= b;
3177 		b >>= 1;
3178 		b ^= 0x70ULL;
3179 	} else {
3180 		b >>= 1;
3181 		b ^= 0x30ULL;
3182 	}
3183 	if (a >= b) {
3184 		a -= b;
3185 		b >>= 1;
3186 		b ^= 0x1cULL;
3187 	} else {
3188 		b >>= 1;
3189 		b ^= 0xcULL;
3190 	}
3191 	if (a >= b) {
3192 		a -= b;
3193 		b >>= 1;
3194 		b ^= 0x7ULL;
3195 	} else {
3196 		b >>= 1;
3197 		b ^= 0x3ULL;
3198 	}
3199 	if (a >= b) {
3200 		a -= b;
3201 		b >>= 1;
3202 		b ^= 0x1ULL;
3203 	} else {
3204 		b >>= 1;
3205 	}
3206 	return (b);
3207 }
3208 
3209 void   *
devres_alloc(dr_release_t release,size_t size,gfp_t gfp)3210 devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
3211 {
3212 	void *ptr;
3213 
3214 	ptr = malloc(size);
3215 	if (ptr != NULL)
3216 		memset(ptr, 0, size);
3217 	return (ptr);
3218 }
3219 
3220 void
devres_free(void * res)3221 devres_free(void *res)
3222 {
3223 	free(res);
3224 }
3225 
3226 void
devres_add(struct device * dev,void * res)3227 devres_add(struct device *dev, void *res)
3228 {
3229 	/* NOP */
3230 }
3231 
3232 int
devres_destroy(struct device * dev,dr_release_t release,dr_match_t match,void * match_data)3233 devres_destroy(struct device *dev, dr_release_t release,
3234     dr_match_t match, void *match_data)
3235 {
3236 	printf("TODO: Implement devres_destroy()\n");
3237 	return (0);
3238 }
3239 
3240 void   *
devres_open_group(struct device * dev,void * id,gfp_t gfp)3241 devres_open_group(struct device *dev, void *id, gfp_t gfp)
3242 {
3243 	if (id != NULL)
3244 		return (id);
3245 	else
3246 		return (dev);
3247 }
3248 
3249 void
devres_close_group(struct device * dev,void * id)3250 devres_close_group(struct device *dev, void *id)
3251 {
3252 }
3253 
3254 int
devres_release_group(struct device * dev,void * id)3255 devres_release_group(struct device *dev, void *id)
3256 {
3257 	return (0);
3258 }
3259 
3260 int
dma_buf_fd(struct dma_buf * dmabuf,int flags)3261 dma_buf_fd(struct dma_buf *dmabuf, int flags)
3262 {
3263 	return (-1);
3264 }
3265 
3266 struct dma_buf *
dma_buf_get(int fd)3267 dma_buf_get(int fd)
3268 {
3269 	return (NULL);
3270 }
3271 
3272 void
dma_buf_put(struct dma_buf * dmabuf)3273 dma_buf_put(struct dma_buf *dmabuf)
3274 {
3275 
3276 }
3277 
3278 int
dma_buf_vmap(struct dma_buf * a,struct dma_buf_map * b)3279 dma_buf_vmap(struct dma_buf *a, struct dma_buf_map *b)
3280 {
3281 	return (-EOPNOTSUPP);
3282 }
3283 
3284 void
dma_buf_vunmap(struct dma_buf * a,struct dma_buf_map * b)3285 dma_buf_vunmap(struct dma_buf *a, struct dma_buf_map *b)
3286 {
3287 }
3288 
3289 uint32_t
ror32(uint32_t x,uint8_t n)3290 ror32(uint32_t x, uint8_t n)
3291 {
3292 	n &= 0x1f;
3293 	if (n == 0)
3294 		return (x);
3295 	return ((x >> n) | (x << (32 - n)));
3296 }
3297 
3298 unsigned long
gcd(unsigned long a,unsigned long b)3299 gcd(unsigned long a, unsigned long b)
3300 {
3301 	unsigned long r;
3302 
3303 	if (a < b)
3304 		swap(a, b);
3305 	if (!b)
3306 		return (a);
3307 	while ((r = (a % b)) != 0) {
3308 		a = b;
3309 		b = r;
3310 	}
3311 	return (b);
3312 }
3313 
3314 void
get_random_bytes(void * buf,int nbytes)3315 get_random_bytes(void *buf, int nbytes)
3316 {
3317 	while (nbytes--)
3318 		*((char *)buf + nbytes) = rand();
3319 }
3320 
3321 u32
prandom_u32_max(u32 max)3322 prandom_u32_max(u32 max)
3323 {
3324 	return (u32) ((((u64) (u32) rand()) * max) >> 32);
3325 }
3326 
3327 const char *
dev_driver_string(const struct device * dev)3328 dev_driver_string(const struct device *dev)
3329 {
3330 	struct device_driver *drv;
3331 
3332 	drv = dev->driver;
3333 	return (drv ? drv->name :
3334 	    (dev->bus ? dev->bus->name :
3335 	    (dev->class ? dev->class->name : "")));
3336 }
3337 
3338 s32
sign_extend32(u32 value,int index)3339 sign_extend32(u32 value, int index)
3340 {
3341 	u8 shift = 31 - index;
3342 
3343 	return ((s32) (value << shift) >> shift);
3344 }
3345 
3346 char   *
devm_kasprintf(struct device * dev,gfp_t gfp,const char * fmt,...)3347 devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt,...)
3348 {
3349 	va_list ap;
3350 	char *ptr = NULL;
3351 
3352 	va_start(ap, fmt);
3353 	vasprintf(&ptr, fmt, ap);
3354 	va_end(ap);
3355 	return (ptr);
3356 }
3357 
3358 void
eth_zero_addr(u8 * addr)3359 eth_zero_addr(u8 * addr)
3360 {
3361 	memset(addr, 0x00, 6);
3362 }
3363 
3364 struct device *
kobj_to_dev(struct kobject * kobj)3365 kobj_to_dev(struct kobject *kobj)
3366 {
3367 	return (container_of(kobj, struct device, kobj));
3368 }
3369 
3370 void   *
memscan(void * data,int c,size_t sz)3371 memscan(void *data, int c, size_t sz)
3372 {
3373 	uint8_t *p;
3374 
3375 	p = data;
3376 	while (sz != 0 && *p != c) {
3377 		p++;
3378 		sz--;
3379 	}
3380 	return (p);
3381 }
3382 
3383 int
refcount_read(refcount_t * r)3384 refcount_read(refcount_t *r)
3385 {
3386 	return (atomic_read(&r->refs));
3387 }
3388 
3389 bool
refcount_dec_and_test(refcount_t * r)3390 refcount_dec_and_test(refcount_t *r)
3391 {
3392 	return (atomic_dec_and_test(&r->refs));
3393 }
3394 
3395 void
refcount_set(refcount_t * r,int i)3396 refcount_set(refcount_t *r, int i)
3397 {
3398 	atomic_set(&r->refs, i);
3399 }
3400 
3401 void
refcount_inc(refcount_t * r)3402 refcount_inc(refcount_t *r)
3403 {
3404 	atomic_inc(&r->refs);
3405 }
3406 
3407 void
rational_best_approximation(unsigned long given_numerator,unsigned long given_denominator,unsigned long max_numerator,unsigned long max_denominator,unsigned long * best_numerator,unsigned long * best_denominator)3408 rational_best_approximation(
3409     unsigned long given_numerator, unsigned long given_denominator,
3410     unsigned long max_numerator, unsigned long max_denominator,
3411     unsigned long *best_numerator, unsigned long *best_denominator)
3412 {
3413 	unsigned long n, d, n0, d0, n1, d1;
3414 
3415 	n = given_numerator;
3416 	d = given_denominator;
3417 	n0 = d1 = 0;
3418 	n1 = d0 = 1;
3419 	for (;;) {
3420 		unsigned long t, a;
3421 
3422 		if ((n1 > max_numerator) || (d1 > max_denominator)) {
3423 			n1 = n0;
3424 			d1 = d0;
3425 			break;
3426 		}
3427 		if (d == 0)
3428 			break;
3429 		t = d;
3430 		a = n / d;
3431 		d = n % d;
3432 		n = t;
3433 		t = n0 + a * n1;
3434 		n0 = n1;
3435 		n1 = t;
3436 		t = d0 + a * d1;
3437 		d0 = d1;
3438 		d1 = t;
3439 	}
3440 	*best_numerator = n1;
3441 	*best_denominator = d1;
3442 }
3443 
3444 size_t
array_size(size_t a,size_t b)3445 array_size(size_t a, size_t b)
3446 {
3447 	volatile size_t temp;
3448 
3449 	if (a == 0 || b == 0)
3450 		return (0);
3451 
3452 	temp = a * b;
3453 
3454 	if ((temp / b) != a)
3455 		return (SIZE_MAX);
3456 
3457 	if (temp > SSIZE_MAX)
3458 		return (SIZE_MAX);
3459 	else
3460 		return (temp);
3461 }
3462 
3463 size_t
array3_size(size_t a,size_t b,size_t c)3464 array3_size(size_t a, size_t b, size_t c)
3465 {
3466 	volatile size_t temp;
3467 
3468 	if (a == 0 || b == 0 || c == 0)
3469 		return (0);
3470 
3471 	temp = a * b;
3472 
3473 	if ((temp / b) != a)
3474 		return (SIZE_MAX);
3475 
3476 	a = temp;
3477 	temp = a * c;
3478 
3479 	if ((temp / c) != a)
3480 		return (SIZE_MAX);
3481 
3482 	if (temp > SSIZE_MAX)
3483 		return (SIZE_MAX);
3484 	else
3485 		return (temp);
3486 }
3487 
3488 size_t
struct_size_sub(size_t n,size_t m_size,size_t b_size)3489 struct_size_sub(size_t n, size_t m_size, size_t b_size)
3490 {
3491 
3492 	if (n > SSIZE_MAX || m_size > SSIZE_MAX || b_size > SSIZE_MAX)
3493 		return (SIZE_MAX);
3494 
3495 	if (n != 0) {
3496 		volatile size_t temp;
3497 
3498 		temp = (n * m_size);
3499 
3500 		if ((temp / n) != m_size)
3501 			return (SIZE_MAX);
3502 		if (temp > SSIZE_MAX)
3503 			return (SIZE_MAX);
3504 
3505 		temp += b_size;
3506 		if (temp > SSIZE_MAX)
3507 			return (SIZE_MAX);
3508 
3509 		return (temp);
3510 	} else {
3511 		return (b_size);
3512 	}
3513 }
3514 
3515 ssize_t
strscpy(char * dst,const char * src,size_t size)3516 strscpy(char *dst, const char *src, size_t size)
3517 {
3518 	size_t retval = strlcpy(dst, src, size);
3519 
3520 	if (retval >= size)
3521 		return (-E2BIG);
3522 	else
3523 		return (retval);
3524 }
3525 
3526 ssize_t
memory_read_from_buffer(void * to,size_t count,loff_t * ppos,const void * from,size_t available)3527 memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
3528     const void *from, size_t available)
3529 {
3530 	loff_t pos = *ppos;
3531 
3532 	if (pos < 0)
3533 		return -EINVAL;
3534 	if (pos >= available)
3535 		return 0;
3536 	if (count > available - pos)
3537 		count = available - pos;
3538 	memcpy(to, from + pos, count);
3539 	*ppos = pos + count;
3540 
3541         return count;
3542 }
3543 
3544 struct sg_table *
dma_alloc_noncontiguous(struct device * dev,size_t len,enum dma_data_direction dir,gfp_t gfp,unsigned long attr)3545 dma_alloc_noncontiguous(struct device *dev, size_t len, enum dma_data_direction dir, gfp_t gfp, unsigned long attr)
3546 {
3547 	struct sg_table *sgt = malloc(sizeof(*sgt));
3548 
3549 	if (sgt == NULL)
3550 		return (NULL);
3551 
3552 	memset(sgt, 0, sizeof(*sgt));
3553 
3554 	sgt->sgl = sgt->dummy;
3555 	sgt->nents = 1;
3556 	sgt->orig_nents = 1;
3557 
3558 	sgt->sgl->length = len;
3559 	sgt->sgl->dma_length = len;
3560 	sgt->sgl->dma_address = (long) malloc(len);
3561 	if (sgt->sgl->dma_address == 0) {
3562 		free(sgt);
3563 		return (NULL);
3564 	}
3565 	return (sgt);
3566 }
3567 
3568 void
dma_free_noncontiguous(struct device * dev,size_t len,struct sg_table * sgt,enum dma_data_direction dir)3569 dma_free_noncontiguous(struct device *dev, size_t len, struct sg_table *sgt, enum dma_data_direction dir)
3570 {
3571 	if (sgt == NULL)
3572 		return;
3573 	free((void *)(long)sgt->sgl->dma_address);
3574 	free(sgt);
3575 }
3576 
3577 void *
dma_vmap_noncontiguous(struct device * dev,size_t len,struct sg_table * sgt)3578 dma_vmap_noncontiguous(struct device *dev, size_t len, struct sg_table *sgt)
3579 {
3580 	return ((void *)(long)sgt->sgl->dma_address);
3581 }
3582 
3583