xref: /linux/arch/mips/mm/dma-noncoherent.c (revision 52338415)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
4  * Copyright (C) 2000, 2001, 06	 Ralf Baechle <ralf@linux-mips.org>
5  * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
6  */
7 #include <linux/dma-direct.h>
8 #include <linux/dma-noncoherent.h>
9 #include <linux/dma-contiguous.h>
10 #include <linux/highmem.h>
11 
12 #include <asm/cache.h>
13 #include <asm/cpu-type.h>
14 #include <asm/dma-coherence.h>
15 #include <asm/io.h>
16 
17 /*
18  * The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively
19  * fill random cachelines with stale data at any time, requiring an extra
20  * flush post-DMA.
21  *
22  * Warning on the terminology - Linux calls an uncached area coherent;  MIPS
23  * terminology calls memory areas with hardware maintained coherency coherent.
24  *
25  * Note that the R14000 and R16000 should also be checked for in this condition.
26  * However this function is only called on non-I/O-coherent systems and only the
27  * R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp.
28  * SGI IP32 aka O2.
29  */
30 static inline bool cpu_needs_post_dma_flush(struct device *dev)
31 {
32 	switch (boot_cpu_type()) {
33 	case CPU_R10000:
34 	case CPU_R12000:
35 	case CPU_BMIPS5000:
36 		return true;
37 	default:
38 		/*
39 		 * Presence of MAARs suggests that the CPU supports
40 		 * speculatively prefetching data, and therefore requires
41 		 * the post-DMA flush/invalidate.
42 		 */
43 		return cpu_has_maar;
44 	}
45 }
46 
47 void arch_dma_prep_coherent(struct page *page, size_t size)
48 {
49 	dma_cache_wback_inv((unsigned long)page_address(page), size);
50 }
51 
52 void *uncached_kernel_address(void *addr)
53 {
54 	return (void *)(__pa(addr) + UNCAC_BASE);
55 }
56 
57 void *cached_kernel_address(void *addr)
58 {
59 	return __va(addr) - UNCAC_BASE;
60 }
61 
62 long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
63 		dma_addr_t dma_addr)
64 {
65 	return page_to_pfn(virt_to_page(cached_kernel_address(cpu_addr)));
66 }
67 
68 static inline void dma_sync_virt(void *addr, size_t size,
69 		enum dma_data_direction dir)
70 {
71 	switch (dir) {
72 	case DMA_TO_DEVICE:
73 		dma_cache_wback((unsigned long)addr, size);
74 		break;
75 
76 	case DMA_FROM_DEVICE:
77 		dma_cache_inv((unsigned long)addr, size);
78 		break;
79 
80 	case DMA_BIDIRECTIONAL:
81 		dma_cache_wback_inv((unsigned long)addr, size);
82 		break;
83 
84 	default:
85 		BUG();
86 	}
87 }
88 
89 /*
90  * A single sg entry may refer to multiple physically contiguous pages.  But
91  * we still need to process highmem pages individually.  If highmem is not
92  * configured then the bulk of this loop gets optimized out.
93  */
94 static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
95 		enum dma_data_direction dir)
96 {
97 	struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
98 	unsigned long offset = paddr & ~PAGE_MASK;
99 	size_t left = size;
100 
101 	do {
102 		size_t len = left;
103 
104 		if (PageHighMem(page)) {
105 			void *addr;
106 
107 			if (offset + len > PAGE_SIZE)
108 				len = PAGE_SIZE - offset;
109 
110 			addr = kmap_atomic(page);
111 			dma_sync_virt(addr + offset, len, dir);
112 			kunmap_atomic(addr);
113 		} else
114 			dma_sync_virt(page_address(page) + offset, size, dir);
115 		offset = 0;
116 		page++;
117 		left -= len;
118 	} while (left);
119 }
120 
121 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
122 		size_t size, enum dma_data_direction dir)
123 {
124 	dma_sync_phys(paddr, size, dir);
125 }
126 
127 #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
128 void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
129 		size_t size, enum dma_data_direction dir)
130 {
131 	if (cpu_needs_post_dma_flush(dev))
132 		dma_sync_phys(paddr, size, dir);
133 }
134 #endif
135 
136 void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
137 		enum dma_data_direction direction)
138 {
139 	BUG_ON(direction == DMA_NONE);
140 
141 	dma_sync_virt(vaddr, size, direction);
142 }
143 
144 #ifdef CONFIG_DMA_PERDEV_COHERENT
145 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
146 		const struct iommu_ops *iommu, bool coherent)
147 {
148 	dev->dma_coherent = coherent;
149 }
150 #endif
151