xref: /linux/arch/parisc/kernel/pci-dma.c (revision 44f57d78)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 ** PARISC 1.1 Dynamic DMA mapping support.
4 ** This implementation is for PA-RISC platforms that do not support
5 ** I/O TLBs (aka DMA address translation hardware).
6 ** See Documentation/DMA-API-HOWTO.txt for interface definitions.
7 **
8 **      (c) Copyright 1999,2000 Hewlett-Packard Company
9 **      (c) Copyright 2000 Grant Grundler
10 **	(c) Copyright 2000 Philipp Rumpf <prumpf@tux.org>
11 **      (c) Copyright 2000 John Marvin
12 **
13 ** "leveraged" from 2.3.47: arch/ia64/kernel/pci-dma.c.
14 ** (I assume it's from David Mosberger-Tang but there was no Copyright)
15 **
16 ** AFAIK, all PA7100LC and PA7300LC platforms can use this code.
17 **
18 ** - ggg
19 */
20 
21 #include <linux/init.h>
22 #include <linux/gfp.h>
23 #include <linux/mm.h>
24 #include <linux/proc_fs.h>
25 #include <linux/seq_file.h>
26 #include <linux/string.h>
27 #include <linux/types.h>
28 #include <linux/dma-direct.h>
29 #include <linux/dma-noncoherent.h>
30 
31 #include <asm/cacheflush.h>
32 #include <asm/dma.h>    /* for DMA_CHUNK_SIZE */
33 #include <asm/io.h>
34 #include <asm/page.h>	/* get_order */
35 #include <asm/pgalloc.h>
36 #include <linux/uaccess.h>
37 #include <asm/tlbflush.h>	/* for purge_tlb_*() macros */
38 
39 static struct proc_dir_entry * proc_gsc_root __read_mostly = NULL;
40 static unsigned long pcxl_used_bytes __read_mostly = 0;
41 static unsigned long pcxl_used_pages __read_mostly = 0;
42 
43 extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */
44 static DEFINE_SPINLOCK(pcxl_res_lock);
45 static char    *pcxl_res_map;
46 static int     pcxl_res_hint;
47 static int     pcxl_res_size;
48 
49 #ifdef DEBUG_PCXL_RESOURCE
50 #define DBG_RES(x...)	printk(x)
51 #else
52 #define DBG_RES(x...)
53 #endif
54 
55 
56 /*
57 ** Dump a hex representation of the resource map.
58 */
59 
60 #ifdef DUMP_RESMAP
61 static
62 void dump_resmap(void)
63 {
64 	u_long *res_ptr = (unsigned long *)pcxl_res_map;
65 	u_long i = 0;
66 
67 	printk("res_map: ");
68 	for(; i < (pcxl_res_size / sizeof(unsigned long)); ++i, ++res_ptr)
69 		printk("%08lx ", *res_ptr);
70 
71 	printk("\n");
72 }
73 #else
74 static inline void dump_resmap(void) {;}
75 #endif
76 
77 static inline int map_pte_uncached(pte_t * pte,
78 		unsigned long vaddr,
79 		unsigned long size, unsigned long *paddr_ptr)
80 {
81 	unsigned long end;
82 	unsigned long orig_vaddr = vaddr;
83 
84 	vaddr &= ~PMD_MASK;
85 	end = vaddr + size;
86 	if (end > PMD_SIZE)
87 		end = PMD_SIZE;
88 	do {
89 		unsigned long flags;
90 
91 		if (!pte_none(*pte))
92 			printk(KERN_ERR "map_pte_uncached: page already exists\n");
93 		purge_tlb_start(flags);
94 		set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
95 		pdtlb_kernel(orig_vaddr);
96 		purge_tlb_end(flags);
97 		vaddr += PAGE_SIZE;
98 		orig_vaddr += PAGE_SIZE;
99 		(*paddr_ptr) += PAGE_SIZE;
100 		pte++;
101 	} while (vaddr < end);
102 	return 0;
103 }
104 
105 static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr,
106 		unsigned long size, unsigned long *paddr_ptr)
107 {
108 	unsigned long end;
109 	unsigned long orig_vaddr = vaddr;
110 
111 	vaddr &= ~PGDIR_MASK;
112 	end = vaddr + size;
113 	if (end > PGDIR_SIZE)
114 		end = PGDIR_SIZE;
115 	do {
116 		pte_t * pte = pte_alloc_kernel(pmd, vaddr);
117 		if (!pte)
118 			return -ENOMEM;
119 		if (map_pte_uncached(pte, orig_vaddr, end - vaddr, paddr_ptr))
120 			return -ENOMEM;
121 		vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
122 		orig_vaddr += PMD_SIZE;
123 		pmd++;
124 	} while (vaddr < end);
125 	return 0;
126 }
127 
128 static inline int map_uncached_pages(unsigned long vaddr, unsigned long size,
129 		unsigned long paddr)
130 {
131 	pgd_t * dir;
132 	unsigned long end = vaddr + size;
133 
134 	dir = pgd_offset_k(vaddr);
135 	do {
136 		pmd_t *pmd;
137 
138 		pmd = pmd_alloc(NULL, dir, vaddr);
139 		if (!pmd)
140 			return -ENOMEM;
141 		if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr))
142 			return -ENOMEM;
143 		vaddr = vaddr + PGDIR_SIZE;
144 		dir++;
145 	} while (vaddr && (vaddr < end));
146 	return 0;
147 }
148 
149 static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr,
150 		unsigned long size)
151 {
152 	pte_t * pte;
153 	unsigned long end;
154 	unsigned long orig_vaddr = vaddr;
155 
156 	if (pmd_none(*pmd))
157 		return;
158 	if (pmd_bad(*pmd)) {
159 		pmd_ERROR(*pmd);
160 		pmd_clear(pmd);
161 		return;
162 	}
163 	pte = pte_offset_map(pmd, vaddr);
164 	vaddr &= ~PMD_MASK;
165 	end = vaddr + size;
166 	if (end > PMD_SIZE)
167 		end = PMD_SIZE;
168 	do {
169 		unsigned long flags;
170 		pte_t page = *pte;
171 
172 		pte_clear(&init_mm, vaddr, pte);
173 		purge_tlb_start(flags);
174 		pdtlb_kernel(orig_vaddr);
175 		purge_tlb_end(flags);
176 		vaddr += PAGE_SIZE;
177 		orig_vaddr += PAGE_SIZE;
178 		pte++;
179 		if (pte_none(page) || pte_present(page))
180 			continue;
181 		printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
182 	} while (vaddr < end);
183 }
184 
185 static inline void unmap_uncached_pmd(pgd_t * dir, unsigned long vaddr,
186 		unsigned long size)
187 {
188 	pmd_t * pmd;
189 	unsigned long end;
190 	unsigned long orig_vaddr = vaddr;
191 
192 	if (pgd_none(*dir))
193 		return;
194 	if (pgd_bad(*dir)) {
195 		pgd_ERROR(*dir);
196 		pgd_clear(dir);
197 		return;
198 	}
199 	pmd = pmd_offset(dir, vaddr);
200 	vaddr &= ~PGDIR_MASK;
201 	end = vaddr + size;
202 	if (end > PGDIR_SIZE)
203 		end = PGDIR_SIZE;
204 	do {
205 		unmap_uncached_pte(pmd, orig_vaddr, end - vaddr);
206 		vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
207 		orig_vaddr += PMD_SIZE;
208 		pmd++;
209 	} while (vaddr < end);
210 }
211 
212 static void unmap_uncached_pages(unsigned long vaddr, unsigned long size)
213 {
214 	pgd_t * dir;
215 	unsigned long end = vaddr + size;
216 
217 	dir = pgd_offset_k(vaddr);
218 	do {
219 		unmap_uncached_pmd(dir, vaddr, end - vaddr);
220 		vaddr = vaddr + PGDIR_SIZE;
221 		dir++;
222 	} while (vaddr && (vaddr < end));
223 }
224 
225 #define PCXL_SEARCH_LOOP(idx, mask, size)  \
226        for(; res_ptr < res_end; ++res_ptr) \
227        { \
228                if(0 == ((*res_ptr) & mask)) { \
229                        *res_ptr |= mask; \
230 		       idx = (int)((u_long)res_ptr - (u_long)pcxl_res_map); \
231 		       pcxl_res_hint = idx + (size >> 3); \
232                        goto resource_found; \
233                } \
234        }
235 
236 #define PCXL_FIND_FREE_MAPPING(idx, mask, size)  { \
237        u##size *res_ptr = (u##size *)&(pcxl_res_map[pcxl_res_hint & ~((size >> 3) - 1)]); \
238        u##size *res_end = (u##size *)&pcxl_res_map[pcxl_res_size]; \
239        PCXL_SEARCH_LOOP(idx, mask, size); \
240        res_ptr = (u##size *)&pcxl_res_map[0]; \
241        PCXL_SEARCH_LOOP(idx, mask, size); \
242 }
243 
244 unsigned long
245 pcxl_alloc_range(size_t size)
246 {
247 	int res_idx;
248 	u_long mask, flags;
249 	unsigned int pages_needed = size >> PAGE_SHIFT;
250 
251 	mask = (u_long) -1L;
252  	mask >>= BITS_PER_LONG - pages_needed;
253 
254 	DBG_RES("pcxl_alloc_range() size: %d pages_needed %d pages_mask 0x%08lx\n",
255 		size, pages_needed, mask);
256 
257 	spin_lock_irqsave(&pcxl_res_lock, flags);
258 
259 	if(pages_needed <= 8) {
260 		PCXL_FIND_FREE_MAPPING(res_idx, mask, 8);
261 	} else if(pages_needed <= 16) {
262 		PCXL_FIND_FREE_MAPPING(res_idx, mask, 16);
263 	} else if(pages_needed <= 32) {
264 		PCXL_FIND_FREE_MAPPING(res_idx, mask, 32);
265 	} else {
266 		panic("%s: pcxl_alloc_range() Too many pages to map.\n",
267 		      __FILE__);
268 	}
269 
270 	dump_resmap();
271 	panic("%s: pcxl_alloc_range() out of dma mapping resources\n",
272 	      __FILE__);
273 
274 resource_found:
275 
276 	DBG_RES("pcxl_alloc_range() res_idx %d mask 0x%08lx res_hint: %d\n",
277 		res_idx, mask, pcxl_res_hint);
278 
279 	pcxl_used_pages += pages_needed;
280 	pcxl_used_bytes += ((pages_needed >> 3) ? (pages_needed >> 3) : 1);
281 
282 	spin_unlock_irqrestore(&pcxl_res_lock, flags);
283 
284 	dump_resmap();
285 
286 	/*
287 	** return the corresponding vaddr in the pcxl dma map
288 	*/
289 	return (pcxl_dma_start + (res_idx << (PAGE_SHIFT + 3)));
290 }
291 
292 #define PCXL_FREE_MAPPINGS(idx, m, size) \
293 		u##size *res_ptr = (u##size *)&(pcxl_res_map[(idx) + (((size >> 3) - 1) & (~((size >> 3) - 1)))]); \
294 		/* BUG_ON((*res_ptr & m) != m); */ \
295 		*res_ptr &= ~m;
296 
297 /*
298 ** clear bits in the pcxl resource map
299 */
300 static void
301 pcxl_free_range(unsigned long vaddr, size_t size)
302 {
303 	u_long mask, flags;
304 	unsigned int res_idx = (vaddr - pcxl_dma_start) >> (PAGE_SHIFT + 3);
305 	unsigned int pages_mapped = size >> PAGE_SHIFT;
306 
307 	mask = (u_long) -1L;
308  	mask >>= BITS_PER_LONG - pages_mapped;
309 
310 	DBG_RES("pcxl_free_range() res_idx: %d size: %d pages_mapped %d mask 0x%08lx\n",
311 		res_idx, size, pages_mapped, mask);
312 
313 	spin_lock_irqsave(&pcxl_res_lock, flags);
314 
315 	if(pages_mapped <= 8) {
316 		PCXL_FREE_MAPPINGS(res_idx, mask, 8);
317 	} else if(pages_mapped <= 16) {
318 		PCXL_FREE_MAPPINGS(res_idx, mask, 16);
319 	} else if(pages_mapped <= 32) {
320 		PCXL_FREE_MAPPINGS(res_idx, mask, 32);
321 	} else {
322 		panic("%s: pcxl_free_range() Too many pages to unmap.\n",
323 		      __FILE__);
324 	}
325 
326 	pcxl_used_pages -= (pages_mapped ? pages_mapped : 1);
327 	pcxl_used_bytes -= ((pages_mapped >> 3) ? (pages_mapped >> 3) : 1);
328 
329 	spin_unlock_irqrestore(&pcxl_res_lock, flags);
330 
331 	dump_resmap();
332 }
333 
334 static int proc_pcxl_dma_show(struct seq_file *m, void *v)
335 {
336 #if 0
337 	u_long i = 0;
338 	unsigned long *res_ptr = (u_long *)pcxl_res_map;
339 #endif
340 	unsigned long total_pages = pcxl_res_size << 3;   /* 8 bits per byte */
341 
342 	seq_printf(m, "\nDMA Mapping Area size    : %d bytes (%ld pages)\n",
343 		PCXL_DMA_MAP_SIZE, total_pages);
344 
345 	seq_printf(m, "Resource bitmap : %d bytes\n", pcxl_res_size);
346 
347 	seq_puts(m,  "     	  total:    free:    used:   % used:\n");
348 	seq_printf(m, "blocks  %8d %8ld %8ld %8ld%%\n", pcxl_res_size,
349 		pcxl_res_size - pcxl_used_bytes, pcxl_used_bytes,
350 		(pcxl_used_bytes * 100) / pcxl_res_size);
351 
352 	seq_printf(m, "pages   %8ld %8ld %8ld %8ld%%\n", total_pages,
353 		total_pages - pcxl_used_pages, pcxl_used_pages,
354 		(pcxl_used_pages * 100 / total_pages));
355 
356 #if 0
357 	seq_puts(m, "\nResource bitmap:");
358 
359 	for(; i < (pcxl_res_size / sizeof(u_long)); ++i, ++res_ptr) {
360 		if ((i & 7) == 0)
361 		    seq_puts(m,"\n   ");
362 		seq_printf(m, "%s %08lx", buf, *res_ptr);
363 	}
364 #endif
365 	seq_putc(m, '\n');
366 	return 0;
367 }
368 
369 static int __init
370 pcxl_dma_init(void)
371 {
372 	if (pcxl_dma_start == 0)
373 		return 0;
374 
375 	pcxl_res_size = PCXL_DMA_MAP_SIZE >> (PAGE_SHIFT + 3);
376 	pcxl_res_hint = 0;
377 	pcxl_res_map = (char *)__get_free_pages(GFP_KERNEL,
378 					    get_order(pcxl_res_size));
379 	memset(pcxl_res_map, 0, pcxl_res_size);
380 	proc_gsc_root = proc_mkdir("gsc", NULL);
381 	if (!proc_gsc_root)
382     		printk(KERN_WARNING
383 			"pcxl_dma_init: Unable to create gsc /proc dir entry\n");
384 	else {
385 		struct proc_dir_entry* ent;
386 		ent = proc_create_single("pcxl_dma", 0, proc_gsc_root,
387 				proc_pcxl_dma_show);
388 		if (!ent)
389 			printk(KERN_WARNING
390 				"pci-dma.c: Unable to create pcxl_dma /proc entry.\n");
391 	}
392 	return 0;
393 }
394 
395 __initcall(pcxl_dma_init);
396 
397 static void *pcxl_dma_alloc(struct device *dev, size_t size,
398 		dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
399 {
400 	unsigned long vaddr;
401 	unsigned long paddr;
402 	int order;
403 
404 	order = get_order(size);
405 	size = 1 << (order + PAGE_SHIFT);
406 	vaddr = pcxl_alloc_range(size);
407 	paddr = __get_free_pages(flag | __GFP_ZERO, order);
408 	flush_kernel_dcache_range(paddr, size);
409 	paddr = __pa(paddr);
410 	map_uncached_pages(vaddr, size, paddr);
411 	*dma_handle = (dma_addr_t) paddr;
412 
413 #if 0
414 /* This probably isn't needed to support EISA cards.
415 ** ISA cards will certainly only support 24-bit DMA addressing.
416 ** Not clear if we can, want, or need to support ISA.
417 */
418 	if (!dev || *dev->coherent_dma_mask < 0xffffffff)
419 		gfp |= GFP_DMA;
420 #endif
421 	return (void *)vaddr;
422 }
423 
424 static void *pcx_dma_alloc(struct device *dev, size_t size,
425 		dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
426 {
427 	void *addr;
428 
429 	if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0)
430 		return NULL;
431 
432 	addr = (void *)__get_free_pages(flag | __GFP_ZERO, get_order(size));
433 	if (addr)
434 		*dma_handle = (dma_addr_t)virt_to_phys(addr);
435 
436 	return addr;
437 }
438 
439 void *arch_dma_alloc(struct device *dev, size_t size,
440 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
441 {
442 
443 	if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl)
444 		return pcxl_dma_alloc(dev, size, dma_handle, gfp, attrs);
445 	else
446 		return pcx_dma_alloc(dev, size, dma_handle, gfp, attrs);
447 }
448 
449 void arch_dma_free(struct device *dev, size_t size, void *vaddr,
450 		dma_addr_t dma_handle, unsigned long attrs)
451 {
452 	int order = get_order(size);
453 
454 	if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) {
455 		size = 1 << (order + PAGE_SHIFT);
456 		unmap_uncached_pages((unsigned long)vaddr, size);
457 		pcxl_free_range((unsigned long)vaddr, size);
458 
459 		vaddr = __va(dma_handle);
460 	}
461 	free_pages((unsigned long)vaddr, get_order(size));
462 }
463 
464 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
465 		size_t size, enum dma_data_direction dir)
466 {
467 	flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
468 }
469 
470 void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
471 		size_t size, enum dma_data_direction dir)
472 {
473 	flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
474 }
475 
476 void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
477 	       enum dma_data_direction direction)
478 {
479 	flush_kernel_dcache_range((unsigned long)vaddr, size);
480 }
481