1 #ifndef _LINUX_COMPAT_H_
2 #define _LINUX_COMPAT_H_
3 
4 #include <console.h>
5 #include <log.h>
6 #include <malloc.h>
7 
8 #include <asm/processor.h>
9 
10 #include <linux/types.h>
11 #include <linux/err.h>
12 #include <linux/kernel.h>
13 
14 #ifdef CONFIG_XEN
15 #include <xen/events.h>
16 #endif
17 
18 struct unused {};
19 typedef struct unused unused_t;
20 
21 struct p_current{
22        int pid;
23 };
24 
25 extern struct p_current *current;
26 
27 #define GFP_ATOMIC ((gfp_t) 0)
28 #define GFP_KERNEL ((gfp_t) 0)
29 #define GFP_NOFS ((gfp_t) 0)
30 #define GFP_USER ((gfp_t) 0)
31 #define __GFP_NOWARN ((gfp_t) 0)
32 #define __GFP_ZERO	((__force gfp_t)0x8000u)	/* Return zeroed page on success */
33 
34 void *kmalloc(size_t size, int flags);
35 
kzalloc(size_t size,gfp_t flags)36 static inline void *kzalloc(size_t size, gfp_t flags)
37 {
38 	return kmalloc(size, flags | __GFP_ZERO);
39 }
40 
kmalloc_array(size_t n,size_t size,gfp_t flags)41 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
42 {
43 	if (size != 0 && n > SIZE_MAX / size)
44 		return NULL;
45 	return kmalloc(n * size, flags | __GFP_ZERO);
46 }
47 
kcalloc(size_t n,size_t size,gfp_t flags)48 static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
49 {
50 	return kmalloc_array(n, size, flags | __GFP_ZERO);
51 }
52 
53 #define vmalloc(size)	kmalloc(size, 0)
54 #define __vmalloc(size, flags, pgsz)	kmalloc(size, flags)
vzalloc(unsigned long size)55 static inline void *vzalloc(unsigned long size)
56 {
57 	return kzalloc(size, 0);
58 }
kfree(const void * block)59 static inline void kfree(const void *block)
60 {
61 	free((void *)block);
62 }
vfree(const void * addr)63 static inline void vfree(const void *addr)
64 {
65 	free((void *)addr);
66 }
67 
68 struct kmem_cache { int sz; };
69 
70 struct kmem_cache *get_mem(int element_sz);
71 #define kmem_cache_create(a, sz, c, d, e)	get_mem(sz)
72 void *kmem_cache_alloc(struct kmem_cache *obj, int flag);
kmem_cache_free(struct kmem_cache * cachep,void * obj)73 static inline void kmem_cache_free(struct kmem_cache *cachep, void *obj)
74 {
75 	free(obj);
76 }
kmem_cache_destroy(struct kmem_cache * cachep)77 static inline void kmem_cache_destroy(struct kmem_cache *cachep)
78 {
79 	free(cachep);
80 }
81 
82 #define DECLARE_WAITQUEUE(...)	do { } while (0)
83 #define add_wait_queue(...)	do { } while (0)
84 #define remove_wait_queue(...)	do { } while (0)
85 
86 #ifndef CONFIG_XEN
87 #define eventchn_poll()
88 #endif
89 
90 #define __wait_event_timeout(condition, timeout, ret)		\
91 ({								\
92 	ulong __ret = ret; /* explicit shadow */		\
93 	ulong start = get_timer(0);				\
94 	for (;;) {						\
95 		eventchn_poll();				\
96 		if (condition) {				\
97 			__ret = 1;				\
98 			break;					\
99 	}							\
100 	if ((get_timer(start) > timeout) || ctrlc()) {		\
101 		__ret = 0;					\
102 		break;						\
103 	}							\
104 	cpu_relax();						\
105 	}							\
106 	__ret;							\
107 })
108 
109 /**
110  * wait_event_timeout() - Wait until the event occurs before the timeout.
111  * @wr_head: The wait queue to wait on.
112  * @condition: Expression for the event to wait for.
113  * @timeout: Maximum waiting time.
114  *
115  * We wait until the @condition evaluates to %true (succeed) or
116  * %false (@timeout elapsed).
117  *
118  * Return:
119  * 0 - if the @condition evaluated to %false after the @timeout elapsed
120  * 1 - if the @condition evaluated to %true
121  */
122 #define wait_event_timeout(wq_head, condition, timeout)			\
123 ({									\
124 	ulong __ret;							\
125 	if (condition)							\
126 		__ret = 1;						\
127 	else								\
128 		__ret = __wait_event_timeout(condition, timeout, __ret);\
129 	__ret;								\
130 })
131 
132 #define KERNEL_VERSION(a,b,c)	(((a) << 16) + ((b) << 8) + (c))
133 
134 /* This is also defined in ARMv8's mmu.h */
135 #ifndef PAGE_SIZE
136 #define PAGE_SIZE	4096
137 #endif
138 
139 /* drivers/char/random.c */
140 #define get_random_bytes(...)
141 
142 /* include/linux/leds.h */
143 struct led_trigger {};
144 
145 #define DEFINE_LED_TRIGGER(x)		static struct led_trigger *x;
146 enum led_brightness {
147 	LED_OFF		= 0,
148 	LED_HALF	= 127,
149 	LED_FULL	= 255,
150 };
151 
led_trigger_register_simple(const char * name,struct led_trigger ** trigger)152 static inline void led_trigger_register_simple(const char *name,
153 					struct led_trigger **trigger) {}
led_trigger_unregister_simple(struct led_trigger * trigger)154 static inline void led_trigger_unregister_simple(struct led_trigger *trigger) {}
led_trigger_event(struct led_trigger * trigger,enum led_brightness event)155 static inline void led_trigger_event(struct led_trigger *trigger,
156 					enum led_brightness event) {}
157 
158 /* uapi/linux/limits.h */
159 #define XATTR_LIST_MAX 65536	/* size of extended attribute namelist (64k) */
160 
161 /**
162  * The type used for indexing onto a disc or disc partition.
163  *
164  * Linux always considers sectors to be 512 bytes long independently
165  * of the devices real block size.
166  *
167  * blkcnt_t is the type of the inode's block count.
168  */
169 #ifdef CONFIG_LBDAF
170 typedef u64 sector_t;
171 typedef u64 blkcnt_t;
172 #else
173 typedef unsigned long sector_t;
174 typedef unsigned long blkcnt_t;
175 #endif
176 
177 /* module */
178 #define THIS_MODULE		0
179 #define try_module_get(...)	1
180 #define module_put(...)		do { } while (0)
181 #define module_init(...)
182 #define module_exit(...)
183 #define EXPORT_SYMBOL(...)
184 #define EXPORT_SYMBOL_GPL(...)
185 #define module_param(...)
186 #define module_param_call(...)
187 #define MODULE_PARM_DESC(...)
188 #define MODULE_VERSION(...)
189 #define MODULE_DESCRIPTION(...)
190 #define MODULE_AUTHOR(...)
191 #define MODULE_LICENSE(...)
192 #define MODULE_ALIAS(...)
193 #define __module_get(...)
194 
195 /* character device */
196 #define MKDEV(...)			0
197 #define MAJOR(dev)			0
198 #define MINOR(dev)			0
199 
200 #define alloc_chrdev_region(...)	0
201 #define unregister_chrdev_region(...)
202 
203 #define class_create(...)		__builtin_return_address(0)
204 #define class_create_file(...)		0
205 #define class_register(...)		0
206 #define class_unregister(...)
207 #define class_remove_file(...)
208 #define class_destroy(...)
209 #define misc_register(...)		0
210 #define misc_deregister(...)
211 
212 #define blocking_notifier_call_chain(...) 0
213 
214 #define __initdata
215 #define late_initcall(...)
216 
217 #define dev_set_name(...)		do { } while (0)
218 #define device_register(...)		0
219 #define device_unregister(...)
220 #define volume_sysfs_init(...)		0
221 #define volume_sysfs_close(...)		do { } while (0)
222 
223 #define init_waitqueue_head(...)	do { } while (0)
224 #define wait_event_interruptible(...)	0
225 #define wake_up_interruptible(...)	do { } while (0)
226 #define dump_stack(...)			do { } while (0)
227 
228 #define task_pid_nr(x)			0
229 #define set_freezable(...)		do { } while (0)
230 #define try_to_freeze(...)		0
231 #define set_current_state(...)		do { } while (0)
232 #define kthread_should_stop(...)	0
233 #define schedule()			do { } while (0)
234 
235 #define setup_timer(timer, func, data) do {} while (0)
236 #define del_timer_sync(timer) do {} while (0)
237 #define schedule_work(work) do {} while (0)
238 #define INIT_WORK(work, fun) do {} while (0)
239 
240 struct work_struct {};
241 
242 unsigned long copy_from_user(void *dest, const void *src,
243 			     unsigned long count);
244 
245 typedef unused_t spinlock_t;
246 typedef int	wait_queue_head_t;
247 
248 #define spin_lock_init(lock) do {} while (0)
249 #define spin_lock(lock) do {} while (0)
250 #define spin_unlock(lock) do {} while (0)
251 #define spin_lock_irqsave(lock, flags) do {} while (0)
252 #define spin_unlock_irqrestore(lock, flags) do { flags = 0; } while (0)
253 
254 #define DEFINE_MUTEX(...)
255 #define mutex_init(...)
256 #define mutex_lock(...)
257 #define mutex_unlock(...)
258 
259 #define init_rwsem(...)			do { } while (0)
260 #define down_read(...)			do { } while (0)
261 #define down_write(...)			do { } while (0)
262 #define down_write_trylock(...)		1
263 #define up_read(...)			do { } while (0)
264 #define up_write(...)			do { } while (0)
265 
266 #define cond_resched()			do { } while (0)
267 #define yield()				do { } while (0)
268 
269 #define __init
270 #define __exit
271 #define __devinit
272 #define __devinitdata
273 #define __devinitconst
274 
275 #define kthread_create(...)	__builtin_return_address(0)
276 #define kthread_stop(...)	do { } while (0)
277 #define wake_up_process(...)	do { } while (0)
278 
279 struct rw_semaphore { int i; };
280 #define down_write(...)			do { } while (0)
281 #define up_write(...)			do { } while (0)
282 #define down_read(...)			do { } while (0)
283 #define up_read(...)			do { } while (0)
284 struct device {
285 	struct device		*parent;
286 	struct class		*class;
287 	dev_t			devt;	/* dev_t, creates the sysfs "dev" */
288 	void	(*release)(struct device *dev);
289 	/* This is used from drivers/usb/musb-new subsystem only */
290 	void		*driver_data;	/* data private to the driver */
291 	void            *device_data;   /* data private to the device */
292 };
293 struct mutex { int i; };
294 struct kernel_param { int i; };
295 
296 struct cdev {
297 	int owner;
298 	dev_t dev;
299 };
300 #define cdev_init(...)		do { } while (0)
301 #define cdev_add(...)		0
302 #define cdev_del(...)		do { } while (0)
303 
304 #define prandom_u32(...)	0
305 
306 typedef struct {
307 	uid_t val;
308 } kuid_t;
309 
310 typedef struct {
311 	gid_t val;
312 } kgid_t;
313 
314 /* from include/linux/types.h */
315 
316 /**
317  * struct callback_head - callback structure for use with RCU and task_work
318  * @next: next update requests in a list
319  * @func: actual update function to call after the grace period.
320  */
321 struct callback_head {
322 	struct callback_head *next;
323 	void (*func)(struct callback_head *head);
324 };
325 #define rcu_head callback_head
326 enum writeback_sync_modes {
327 	WB_SYNC_NONE,	/* Don't wait on anything */
328 	WB_SYNC_ALL,	/* Wait on every mapping */
329 };
330 
331 /* from include/linux/writeback.h */
332 /*
333  * A control structure which tells the writeback code what to do.  These are
334  * always on the stack, and hence need no locking.  They are always initialised
335  * in a manner such that unspecified fields are set to zero.
336  */
337 struct writeback_control {
338 	long nr_to_write;		/* Write this many pages, and decrement
339 					   this for each page written */
340 	long pages_skipped;		/* Pages which were not written */
341 
342 	/*
343 	 * For a_ops->writepages(): if start or end are non-zero then this is
344 	 * a hint that the filesystem need only write out the pages inside that
345 	 * byterange.  The byte at `end' is included in the writeout request.
346 	 */
347 	loff_t range_start;
348 	loff_t range_end;
349 
350 	enum writeback_sync_modes sync_mode;
351 
352 	unsigned for_kupdate:1;		/* A kupdate writeback */
353 	unsigned for_background:1;	/* A background writeback */
354 	unsigned tagged_writepages:1;	/* tag-and-write to avoid livelock */
355 	unsigned for_reclaim:1;		/* Invoked from the page allocator */
356 	unsigned range_cyclic:1;	/* range_start is cyclic */
357 	unsigned for_sync:1;		/* sync(2) WB_SYNC_ALL writeback */
358 };
359 
360 void *kmemdup(const void *src, size_t len, gfp_t gfp);
361 
362 typedef int irqreturn_t;
363 
364 struct timer_list {};
365 struct notifier_block {};
366 
367 typedef unsigned long dmaaddr_t;
368 
369 #define pm_runtime_get_sync(dev) do {} while (0)
370 #define pm_runtime_put(dev) do {} while (0)
371 #define pm_runtime_put_sync(dev) do {} while (0)
372 #define pm_runtime_use_autosuspend(dev) do {} while (0)
373 #define pm_runtime_set_autosuspend_delay(dev, delay) do {} while (0)
374 #define pm_runtime_enable(dev) do {} while (0)
375 
376 #define IRQ_NONE 0
377 #define IRQ_HANDLED 1
378 #define IRQ_WAKE_THREAD 2
379 
380 #define dev_set_drvdata(dev, data) do {} while (0)
381 
382 #define enable_irq(...)
383 #define disable_irq(...)
384 #define disable_irq_wake(irq) do {} while (0)
385 #define enable_irq_wake(irq) -EINVAL
386 #define free_irq(irq, data) do {} while (0)
387 #define request_irq(nr, f, flags, nm, data) 0
388 
389 #endif
390