xref: /linux/drivers/gpu/drm/etnaviv/etnaviv_iommu.c (revision 44f57d78)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2014-2018 Etnaviv Project
4  */
5 
6 #include <linux/platform_device.h>
7 #include <linux/sizes.h>
8 #include <linux/slab.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/bitops.h>
11 
12 #include "etnaviv_gpu.h"
13 #include "etnaviv_mmu.h"
14 #include "etnaviv_iommu.h"
15 #include "state_hi.xml.h"
16 
17 #define PT_SIZE		SZ_2M
18 #define PT_ENTRIES	(PT_SIZE / sizeof(u32))
19 
20 #define GPU_MEM_START	0x80000000
21 
22 struct etnaviv_iommuv1_domain {
23 	struct etnaviv_iommu_domain base;
24 	u32 *pgtable_cpu;
25 	dma_addr_t pgtable_dma;
26 };
27 
28 static struct etnaviv_iommuv1_domain *
29 to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
30 {
31 	return container_of(domain, struct etnaviv_iommuv1_domain, base);
32 }
33 
34 static int __etnaviv_iommu_init(struct etnaviv_iommuv1_domain *etnaviv_domain)
35 {
36 	u32 *p;
37 	int i;
38 
39 	etnaviv_domain->base.bad_page_cpu =
40 			dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
41 				     &etnaviv_domain->base.bad_page_dma,
42 				     GFP_KERNEL);
43 	if (!etnaviv_domain->base.bad_page_cpu)
44 		return -ENOMEM;
45 
46 	p = etnaviv_domain->base.bad_page_cpu;
47 	for (i = 0; i < SZ_4K / 4; i++)
48 		*p++ = 0xdead55aa;
49 
50 	etnaviv_domain->pgtable_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
51 						   PT_SIZE,
52 						   &etnaviv_domain->pgtable_dma,
53 						   GFP_KERNEL);
54 	if (!etnaviv_domain->pgtable_cpu) {
55 		dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
56 			    etnaviv_domain->base.bad_page_cpu,
57 			    etnaviv_domain->base.bad_page_dma);
58 		return -ENOMEM;
59 	}
60 
61 	memset32(etnaviv_domain->pgtable_cpu, etnaviv_domain->base.bad_page_dma,
62 		 PT_ENTRIES);
63 
64 	return 0;
65 }
66 
67 static void etnaviv_iommuv1_domain_free(struct etnaviv_iommu_domain *domain)
68 {
69 	struct etnaviv_iommuv1_domain *etnaviv_domain =
70 			to_etnaviv_domain(domain);
71 
72 	dma_free_wc(etnaviv_domain->base.dev, PT_SIZE,
73 		    etnaviv_domain->pgtable_cpu, etnaviv_domain->pgtable_dma);
74 
75 	dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
76 		    etnaviv_domain->base.bad_page_cpu,
77 		    etnaviv_domain->base.bad_page_dma);
78 
79 	kfree(etnaviv_domain);
80 }
81 
82 static int etnaviv_iommuv1_map(struct etnaviv_iommu_domain *domain,
83 			       unsigned long iova, phys_addr_t paddr,
84 			       size_t size, int prot)
85 {
86 	struct etnaviv_iommuv1_domain *etnaviv_domain = to_etnaviv_domain(domain);
87 	unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
88 
89 	if (size != SZ_4K)
90 		return -EINVAL;
91 
92 	etnaviv_domain->pgtable_cpu[index] = paddr;
93 
94 	return 0;
95 }
96 
97 static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_domain *domain,
98 	unsigned long iova, size_t size)
99 {
100 	struct etnaviv_iommuv1_domain *etnaviv_domain =
101 			to_etnaviv_domain(domain);
102 	unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
103 
104 	if (size != SZ_4K)
105 		return -EINVAL;
106 
107 	etnaviv_domain->pgtable_cpu[index] = etnaviv_domain->base.bad_page_dma;
108 
109 	return SZ_4K;
110 }
111 
112 static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_domain *domain)
113 {
114 	return PT_SIZE;
115 }
116 
117 static void etnaviv_iommuv1_dump(struct etnaviv_iommu_domain *domain, void *buf)
118 {
119 	struct etnaviv_iommuv1_domain *etnaviv_domain =
120 			to_etnaviv_domain(domain);
121 
122 	memcpy(buf, etnaviv_domain->pgtable_cpu, PT_SIZE);
123 }
124 
125 void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
126 {
127 	struct etnaviv_iommuv1_domain *etnaviv_domain =
128 			to_etnaviv_domain(gpu->mmu->domain);
129 	u32 pgtable;
130 
131 	/* set base addresses */
132 	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, gpu->memory_base);
133 	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, gpu->memory_base);
134 	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, gpu->memory_base);
135 	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, gpu->memory_base);
136 	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
137 
138 	/* set page table address in MC */
139 	pgtable = (u32)etnaviv_domain->pgtable_dma;
140 
141 	gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
142 	gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
143 	gpu_write(gpu, VIVS_MC_MMU_PE_PAGE_TABLE, pgtable);
144 	gpu_write(gpu, VIVS_MC_MMU_PEZ_PAGE_TABLE, pgtable);
145 	gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
146 }
147 
148 static const struct etnaviv_iommu_domain_ops etnaviv_iommuv1_ops = {
149 	.free = etnaviv_iommuv1_domain_free,
150 	.map = etnaviv_iommuv1_map,
151 	.unmap = etnaviv_iommuv1_unmap,
152 	.dump_size = etnaviv_iommuv1_dump_size,
153 	.dump = etnaviv_iommuv1_dump,
154 };
155 
156 struct etnaviv_iommu_domain *
157 etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu)
158 {
159 	struct etnaviv_iommuv1_domain *etnaviv_domain;
160 	struct etnaviv_iommu_domain *domain;
161 	int ret;
162 
163 	etnaviv_domain = kzalloc(sizeof(*etnaviv_domain), GFP_KERNEL);
164 	if (!etnaviv_domain)
165 		return NULL;
166 
167 	domain = &etnaviv_domain->base;
168 
169 	domain->dev = gpu->dev;
170 	domain->base = GPU_MEM_START;
171 	domain->size = PT_ENTRIES * SZ_4K;
172 	domain->ops = &etnaviv_iommuv1_ops;
173 
174 	ret = __etnaviv_iommu_init(etnaviv_domain);
175 	if (ret)
176 		goto out_free;
177 
178 	return &etnaviv_domain->base;
179 
180 out_free:
181 	kfree(etnaviv_domain);
182 	return NULL;
183 }
184