xref: /linux/drivers/tty/serial/8250/8250_dma.c (revision 0be3ff0c)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * 8250_dma.c - DMA Engine API support for 8250.c
4  *
5  * Copyright (C) 2013 Intel Corporation
6  */
7 #include <linux/tty.h>
8 #include <linux/tty_flip.h>
9 #include <linux/serial_reg.h>
10 #include <linux/dma-mapping.h>
11 
12 #include "8250.h"
13 
14 static void __dma_tx_complete(void *param)
15 {
16 	struct uart_8250_port	*p = param;
17 	struct uart_8250_dma	*dma = p->dma;
18 	struct circ_buf		*xmit = &p->port.state->xmit;
19 	unsigned long	flags;
20 	int		ret;
21 
22 	dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
23 				UART_XMIT_SIZE, DMA_TO_DEVICE);
24 
25 	spin_lock_irqsave(&p->port.lock, flags);
26 
27 	dma->tx_running = 0;
28 
29 	xmit->tail += dma->tx_size;
30 	xmit->tail &= UART_XMIT_SIZE - 1;
31 	p->port.icount.tx += dma->tx_size;
32 
33 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
34 		uart_write_wakeup(&p->port);
35 
36 	ret = serial8250_tx_dma(p);
37 	if (ret)
38 		serial8250_set_THRI(p);
39 
40 	spin_unlock_irqrestore(&p->port.lock, flags);
41 }
42 
43 static void __dma_rx_complete(void *param)
44 {
45 	struct uart_8250_port	*p = param;
46 	struct uart_8250_dma	*dma = p->dma;
47 	struct tty_port		*tty_port = &p->port.state->port;
48 	struct dma_tx_state	state;
49 	int			count;
50 
51 	dma->rx_running = 0;
52 	dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
53 
54 	count = dma->rx_size - state.residue;
55 
56 	tty_insert_flip_string(tty_port, dma->rx_buf, count);
57 	p->port.icount.rx += count;
58 
59 	tty_flip_buffer_push(tty_port);
60 }
61 
62 int serial8250_tx_dma(struct uart_8250_port *p)
63 {
64 	struct uart_8250_dma		*dma = p->dma;
65 	struct circ_buf			*xmit = &p->port.state->xmit;
66 	struct dma_async_tx_descriptor	*desc;
67 	struct uart_port		*up = &p->port;
68 	int ret;
69 
70 	if (dma->tx_running) {
71 		if (up->x_char) {
72 			dmaengine_pause(dma->txchan);
73 			uart_xchar_out(up, UART_TX);
74 			dmaengine_resume(dma->txchan);
75 		}
76 		return 0;
77 	} else if (up->x_char) {
78 		uart_xchar_out(up, UART_TX);
79 	}
80 
81 	if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) {
82 		/* We have been called from __dma_tx_complete() */
83 		serial8250_rpm_put_tx(p);
84 		return 0;
85 	}
86 
87 	dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
88 
89 	desc = dmaengine_prep_slave_single(dma->txchan,
90 					   dma->tx_addr + xmit->tail,
91 					   dma->tx_size, DMA_MEM_TO_DEV,
92 					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
93 	if (!desc) {
94 		ret = -EBUSY;
95 		goto err;
96 	}
97 
98 	dma->tx_running = 1;
99 	desc->callback = __dma_tx_complete;
100 	desc->callback_param = p;
101 
102 	dma->tx_cookie = dmaengine_submit(desc);
103 
104 	dma_sync_single_for_device(dma->txchan->device->dev, dma->tx_addr,
105 				   UART_XMIT_SIZE, DMA_TO_DEVICE);
106 
107 	dma_async_issue_pending(dma->txchan);
108 	if (dma->tx_err) {
109 		dma->tx_err = 0;
110 		serial8250_clear_THRI(p);
111 	}
112 	return 0;
113 err:
114 	dma->tx_err = 1;
115 	return ret;
116 }
117 
118 int serial8250_rx_dma(struct uart_8250_port *p)
119 {
120 	struct uart_8250_dma		*dma = p->dma;
121 	struct dma_async_tx_descriptor	*desc;
122 
123 	if (dma->rx_running)
124 		return 0;
125 
126 	desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
127 					   dma->rx_size, DMA_DEV_TO_MEM,
128 					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
129 	if (!desc)
130 		return -EBUSY;
131 
132 	dma->rx_running = 1;
133 	desc->callback = __dma_rx_complete;
134 	desc->callback_param = p;
135 
136 	dma->rx_cookie = dmaengine_submit(desc);
137 
138 	dma_async_issue_pending(dma->rxchan);
139 
140 	return 0;
141 }
142 
143 void serial8250_rx_dma_flush(struct uart_8250_port *p)
144 {
145 	struct uart_8250_dma *dma = p->dma;
146 
147 	if (dma->rx_running) {
148 		dmaengine_pause(dma->rxchan);
149 		__dma_rx_complete(p);
150 		dmaengine_terminate_async(dma->rxchan);
151 	}
152 }
153 EXPORT_SYMBOL_GPL(serial8250_rx_dma_flush);
154 
155 int serial8250_request_dma(struct uart_8250_port *p)
156 {
157 	struct uart_8250_dma	*dma = p->dma;
158 	phys_addr_t rx_dma_addr = dma->rx_dma_addr ?
159 				  dma->rx_dma_addr : p->port.mapbase;
160 	phys_addr_t tx_dma_addr = dma->tx_dma_addr ?
161 				  dma->tx_dma_addr : p->port.mapbase;
162 	dma_cap_mask_t		mask;
163 	struct dma_slave_caps	caps;
164 	int			ret;
165 
166 	/* Default slave configuration parameters */
167 	dma->rxconf.direction		= DMA_DEV_TO_MEM;
168 	dma->rxconf.src_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE;
169 	dma->rxconf.src_addr		= rx_dma_addr + UART_RX;
170 
171 	dma->txconf.direction		= DMA_MEM_TO_DEV;
172 	dma->txconf.dst_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE;
173 	dma->txconf.dst_addr		= tx_dma_addr + UART_TX;
174 
175 	dma_cap_zero(mask);
176 	dma_cap_set(DMA_SLAVE, mask);
177 
178 	/* Get a channel for RX */
179 	dma->rxchan = dma_request_slave_channel_compat(mask,
180 						       dma->fn, dma->rx_param,
181 						       p->port.dev, "rx");
182 	if (!dma->rxchan)
183 		return -ENODEV;
184 
185 	/* 8250 rx dma requires dmaengine driver to support pause/terminate */
186 	ret = dma_get_slave_caps(dma->rxchan, &caps);
187 	if (ret)
188 		goto release_rx;
189 	if (!caps.cmd_pause || !caps.cmd_terminate ||
190 	    caps.residue_granularity == DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
191 		ret = -EINVAL;
192 		goto release_rx;
193 	}
194 
195 	dmaengine_slave_config(dma->rxchan, &dma->rxconf);
196 
197 	/* Get a channel for TX */
198 	dma->txchan = dma_request_slave_channel_compat(mask,
199 						       dma->fn, dma->tx_param,
200 						       p->port.dev, "tx");
201 	if (!dma->txchan) {
202 		ret = -ENODEV;
203 		goto release_rx;
204 	}
205 
206 	/* 8250 tx dma requires dmaengine driver to support terminate */
207 	ret = dma_get_slave_caps(dma->txchan, &caps);
208 	if (ret)
209 		goto err;
210 	if (!caps.cmd_terminate) {
211 		ret = -EINVAL;
212 		goto err;
213 	}
214 
215 	dmaengine_slave_config(dma->txchan, &dma->txconf);
216 
217 	/* RX buffer */
218 	if (!dma->rx_size)
219 		dma->rx_size = PAGE_SIZE;
220 
221 	dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
222 					&dma->rx_addr, GFP_KERNEL);
223 	if (!dma->rx_buf) {
224 		ret = -ENOMEM;
225 		goto err;
226 	}
227 
228 	/* TX buffer */
229 	dma->tx_addr = dma_map_single(dma->txchan->device->dev,
230 					p->port.state->xmit.buf,
231 					UART_XMIT_SIZE,
232 					DMA_TO_DEVICE);
233 	if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) {
234 		dma_free_coherent(dma->rxchan->device->dev, dma->rx_size,
235 				  dma->rx_buf, dma->rx_addr);
236 		ret = -ENOMEM;
237 		goto err;
238 	}
239 
240 	dev_dbg_ratelimited(p->port.dev, "got both dma channels\n");
241 
242 	return 0;
243 err:
244 	dma_release_channel(dma->txchan);
245 release_rx:
246 	dma_release_channel(dma->rxchan);
247 	return ret;
248 }
249 EXPORT_SYMBOL_GPL(serial8250_request_dma);
250 
251 void serial8250_release_dma(struct uart_8250_port *p)
252 {
253 	struct uart_8250_dma *dma = p->dma;
254 
255 	if (!dma)
256 		return;
257 
258 	/* Release RX resources */
259 	dmaengine_terminate_sync(dma->rxchan);
260 	dma_free_coherent(dma->rxchan->device->dev, dma->rx_size, dma->rx_buf,
261 			  dma->rx_addr);
262 	dma_release_channel(dma->rxchan);
263 	dma->rxchan = NULL;
264 
265 	/* Release TX resources */
266 	dmaengine_terminate_sync(dma->txchan);
267 	dma_unmap_single(dma->txchan->device->dev, dma->tx_addr,
268 			 UART_XMIT_SIZE, DMA_TO_DEVICE);
269 	dma_release_channel(dma->txchan);
270 	dma->txchan = NULL;
271 	dma->tx_running = 0;
272 
273 	dev_dbg_ratelimited(p->port.dev, "dma channels released\n");
274 }
275 EXPORT_SYMBOL_GPL(serial8250_release_dma);
276