1 /*
2 * Copyright (c) 2015-2020 François Tigeot <ftigeot@wolfpond.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #ifndef _ASM_PAGE_H_
28 #define _ASM_PAGE_H_
29
30 struct page;
31
32 #include <vm/vm_page.h>
33
34 static inline vm_paddr_t
page_to_phys(struct page * page)35 page_to_phys(struct page *page)
36 {
37 struct vm_page *p = (struct vm_page *)page;
38
39 return VM_PAGE_TO_PHYS(p);
40 }
41
42 #define LINUX_PAGE_MASK (~PAGE_MASK)
43
44 static inline struct page *
virt_to_page(void * kaddr)45 virt_to_page(void *kaddr)
46 {
47 return (struct page *)PHYS_TO_VM_PAGE(vtophys(kaddr));
48 }
49
50 #include <asm/memory_model.h>
51
52 typedef unsigned long pgprot_t;
53
54 #define virt_to_page(kaddr) (struct page *)PHYS_TO_VM_PAGE(vtophys(kaddr))
55
56 #endif /* _ASM_PAGE_H_ */
57