xref: /netbsd/sys/dev/marvell/gtmpsc.c (revision f041dd23)
1 /*	$NetBSD: gtmpsc.c,v 1.48 2022/11/02 20:38:22 andvar Exp $	*/
2 /*
3  * Copyright (c) 2009 KIYOHARA Takashi
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 /*
28  * mpsc.c - Multi-Protocol Serial Controller driver, supports UART mode only
29  */
30 
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: gtmpsc.c,v 1.48 2022/11/02 20:38:22 andvar Exp $");
33 
34 #include "opt_kgdb.h"
35 
36 #include <sys/param.h>
37 #include <sys/bus.h>
38 #include <sys/conf.h>
39 #include <sys/device.h>
40 #include <sys/fcntl.h>
41 #include <sys/intr.h>
42 #include <sys/kauth.h>
43 #include <sys/kernel.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/systm.h>
47 #include <sys/timepps.h>
48 #include <sys/tty.h>
49 #ifdef KGDB
50 #include <sys/kgdb.h>
51 #endif
52 
53 #include <dev/cons.h>
54 
55 #include <dev/marvell/gtreg.h>
56 #include <dev/marvell/gtvar.h>
57 #include <dev/marvell/gtbrgreg.h>
58 #include <dev/marvell/gtbrgvar.h>
59 #include <dev/marvell/gtsdmareg.h>
60 #include <dev/marvell/gtsdmavar.h>
61 #include <dev/marvell/gtmpscreg.h>
62 #include <dev/marvell/gtmpscvar.h>
63 #include <dev/marvell/marvellreg.h>
64 #include <dev/marvell/marvellvar.h>
65 
66 #include "gtmpsc.h"
67 #include "ioconf.h"
68 #include "locators.h"
69 
70 /*
71  * Wait 2 characters time for RESET_DELAY
72  */
73 #define GTMPSC_RESET_DELAY	(2*8*1000000 / GT_MPSC_DEFAULT_BAUD_RATE)
74 
75 
76 #if defined(DEBUG)
77 unsigned int gtmpsc_debug = 0;
78 # define STATIC
79 # define DPRINTF(x)	do { if (gtmpsc_debug) printf x ; } while (0)
80 #else
81 # define STATIC static
82 # define DPRINTF(x)
83 #endif
84 
85 #define GTMPSCUNIT(x)      TTUNIT(x)
86 #define GTMPSCDIALOUT(x)   TTDIALOUT(x)
87 
88 #define CLEANUP_AND_RETURN_RXDMA(sc, ix)				    \
89 	do {								    \
90 		gtmpsc_pollrx_t *_vrxp = &(sc)->sc_poll_sdmapage->rx[(ix)]; \
91 									    \
92 		_vrxp->rxdesc.sdma_csr =				    \
93 		    SDMA_CSR_RX_L	|				    \
94 		    SDMA_CSR_RX_F	|				    \
95 		    SDMA_CSR_RX_OWN	|				    \
96 		    SDMA_CSR_RX_EI;					    \
97 		_vrxp->rxdesc.sdma_cnt =				    \
98 		    GTMPSC_RXBUFSZ << SDMA_RX_CNT_BUFSZ_SHIFT;		    \
99 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_rxdma_map,	    \
100 		    (ix) * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),   \
101 		    sizeof(vrxp->rxbuf),				    \
102 		    BUS_DMASYNC_PREREAD);				    \
103 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_rxdma_map,	    \
104 		    (ix) * sizeof(gtmpsc_pollrx_t),			    \
105 		    sizeof(sdma_desc_t),				    \
106 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);	    \
107 	} while (0);
108 
109 
110 STATIC int  gtmpscmatch(device_t, cfdata_t, void *);
111 STATIC void gtmpscattach(device_t, device_t, void *);
112 
113 STATIC void gtmpsc_softintr(void *);
114 
115 STATIC void gtmpscstart(struct tty *);
116 STATIC int  gtmpscparam(struct tty *, struct termios *);
117 
118 STATIC void gtmpsc_shutdownhook(void *);
119 
120 STATIC uint32_t cflag2mpcr(tcflag_t);
121 STATIC __inline void gtmpsc_intr_rx(struct gtmpsc_softc *);
122 STATIC __inline void gtmpsc_intr_tx(struct gtmpsc_softc *);
123 STATIC void gtmpsc_write(struct gtmpsc_softc *);
124 STATIC void gtmpsc_txflush(gtmpsc_softc_t *);
125 STATIC void gtmpsc_rxdesc_init(struct gtmpsc_softc *);
126 STATIC void gtmpsc_txdesc_init(struct gtmpsc_softc *);
127 STATIC void gtmpscinit_stop(struct gtmpsc_softc *);
128 STATIC void gtmpscinit_start(struct gtmpsc_softc *);
129 STATIC void gtmpscshutdown(struct gtmpsc_softc *);
130 STATIC void gtmpsc_loadchannelregs(struct gtmpsc_softc *);
131 
132 #ifdef MPSC_CONSOLE
133 STATIC int gtmpsccngetc(dev_t);
134 STATIC void gtmpsccnputc(dev_t, int);
135 STATIC void gtmpsccnpollc(dev_t, int);
136 STATIC void gtmpsccnhalt(dev_t);
137 
138 STATIC int gtmpsc_hackinit(struct gtmpsc_softc *, bus_space_tag_t,
139 			   bus_dma_tag_t, bus_addr_t, int, int, int, tcflag_t);
140 #endif
141 
142 #if defined(MPSC_CONSOLE) || defined(KGDB)
143 STATIC int  gtmpsc_common_getc(struct gtmpsc_softc *);
144 STATIC void gtmpsc_common_putc(struct gtmpsc_softc *, int);
145 STATIC void gtmpsc_common_putc_wait_complete(struct gtmpsc_softc *, int);
146 #endif
147 
148 dev_type_open(gtmpscopen);
149 dev_type_close(gtmpscclose);
150 dev_type_read(gtmpscread);
151 dev_type_write(gtmpscwrite);
152 dev_type_ioctl(gtmpscioctl);
153 dev_type_stop(gtmpscstop);
154 dev_type_tty(gtmpsctty);
155 dev_type_poll(gtmpscpoll);
156 
157 const struct cdevsw gtmpsc_cdevsw = {
158 	.d_open = gtmpscopen,
159 	.d_close = gtmpscclose,
160 	.d_read = gtmpscread,
161 	.d_write = gtmpscwrite,
162 	.d_ioctl = gtmpscioctl,
163 	.d_stop = gtmpscstop,
164 	.d_tty = gtmpsctty,
165 	.d_poll = gtmpscpoll,
166 	.d_mmap = nommap,
167 	.d_kqfilter = ttykqfilter,
168 	.d_discard = nodiscard,
169 	.d_flag = D_TTY
170 };
171 
172 CFATTACH_DECL_NEW(gtmpsc, sizeof(struct gtmpsc_softc),
173     gtmpscmatch, gtmpscattach, NULL, NULL);
174 
175 
176 STATIC uint32_t sdma_imask;		/* soft copy of SDMA IMASK reg */
177 STATIC struct cnm_state gtmpsc_cnm_state;
178 
179 #ifdef KGDB
180 static int gtmpsc_kgdb_addr;
181 static int gtmpsc_kgdb_attached;
182 
183 STATIC int      gtmpsc_kgdb_getc(void *);
184 STATIC void     gtmpsc_kgdb_putc(void *, int);
185 #endif /* KGDB */
186 
187 #ifdef MPSC_CONSOLE
188 /*
189  * hacks for console initialization
190  * which happens prior to autoconfig "attach"
191  *
192  * XXX Assumes PAGE_SIZE is a constant!
193  */
194 gtmpsc_softc_t gtmpsc_cn_softc;
195 STATIC unsigned char gtmpsc_cn_dmapage[PAGE_SIZE] __aligned(PAGE_SIZE);
196 
197 
198 static struct consdev gtmpsc_consdev = {
199 	NULL, NULL, gtmpsccngetc, gtmpsccnputc, gtmpsccnpollc,
200 	NULL, gtmpsccnhalt, NULL, NODEV, CN_NORMAL
201 };
202 #endif
203 
204 
205 #define GT_MPSC_READ(sc, o) \
206 	bus_space_read_4((sc)->sc_iot, (sc)->sc_mpsch, (o))
207 #define GT_MPSC_WRITE(sc, o, v) \
208 	bus_space_write_4((sc)->sc_iot, (sc)->sc_mpsch, (o), (v))
209 #define GT_SDMA_READ(sc, o) \
210 	bus_space_read_4((sc)->sc_iot, (sc)->sc_sdmah, (o))
211 #define GT_SDMA_WRITE(sc, o, v) \
212 	bus_space_write_4((sc)->sc_iot, (sc)->sc_sdmah, (o), (v))
213 
214 
215 /* ARGSUSED */
216 STATIC int
gtmpscmatch(device_t parent,cfdata_t match,void * aux)217 gtmpscmatch(device_t parent, cfdata_t match, void *aux)
218 {
219 	struct marvell_attach_args *mva = aux;
220 
221 	if (strcmp(mva->mva_name, match->cf_name) != 0)
222 		return 0;
223 	if (mva->mva_offset == MVA_OFFSET_DEFAULT)
224 		return 0;
225 
226 	mva->mva_size = GTMPSC_SIZE;
227 	return 1;
228 }
229 
230 /* ARGSUSED */
231 STATIC void
gtmpscattach(device_t parent,device_t self,void * aux)232 gtmpscattach(device_t parent, device_t self, void *aux)
233 {
234 	struct gtmpsc_softc *sc = device_private(self);
235 	struct marvell_attach_args *mva = aux;
236 	bus_dma_segment_t segs;
237 	struct tty *tp;
238 	int rsegs, err, unit;
239 	void *kva;
240 
241 	aprint_naive("\n");
242 	aprint_normal(": Multi-Protocol Serial Controller\n");
243 
244 	if (mva->mva_unit != MVA_UNIT_DEFAULT)
245 		unit = mva->mva_unit;
246 	else
247 		unit = (mva->mva_offset == GTMPSC_BASE(0)) ? 0 : 1;
248 
249 #ifdef MPSC_CONSOLE
250 	if (cn_tab == &gtmpsc_consdev &&
251 	    cn_tab->cn_dev == makedev(0, unit)) {
252 		gtmpsc_cn_softc.sc_dev = self;
253 		memcpy(sc, &gtmpsc_cn_softc, sizeof(struct gtmpsc_softc));
254 		sc->sc_flags = GTMPSC_CONSOLE;
255 	} else
256 #endif
257 	{
258 		if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
259 		    mva->mva_offset, mva->mva_size, &sc->sc_mpsch)) {
260 			aprint_error_dev(self, "Cannot map MPSC registers\n");
261 			return;
262 		}
263 		if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
264 		    GTSDMA_BASE(unit), GTSDMA_SIZE, &sc->sc_sdmah)) {
265 			aprint_error_dev(self, "Cannot map SDMA registers\n");
266 			return;
267 		}
268 		sc->sc_dev = self;
269 		sc->sc_unit = unit;
270 		sc->sc_iot = mva->mva_iot;
271 		sc->sc_dmat = mva->mva_dmat;
272 
273 		err = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
274 		    &segs, 1, &rsegs, BUS_DMA_NOWAIT);
275 		if (err) {
276 			aprint_error_dev(sc->sc_dev,
277 			    "bus_dmamem_alloc error 0x%x\n", err);
278 			goto fail0;
279 		}
280 		err = bus_dmamem_map(sc->sc_dmat, &segs, 1, PAGE_SIZE, &kva,
281 		    BUS_DMA_NOWAIT);
282 		if (err) {
283 			aprint_error_dev(sc->sc_dev,
284 			    "bus_dmamem_map error 0x%x\n", err);
285 			goto fail1;
286 		}
287 		memset(kva, 0, PAGE_SIZE);	/* paranoid/superfluous */
288 		sc->sc_poll_sdmapage = kva;
289 
290 		err = bus_dmamap_create(sc->sc_dmat, sizeof(gtmpsc_polltx_t), 1,
291 		   sizeof(gtmpsc_polltx_t), 0, BUS_DMA_NOWAIT,
292 		   &sc->sc_txdma_map);
293 		if (err != 0) {
294 			aprint_error_dev(sc->sc_dev,
295 			    "bus_dmamap_create error 0x%x\n", err);
296 			goto fail2;
297 		}
298 		err = bus_dmamap_load(sc->sc_dmat, sc->sc_txdma_map,
299 		    sc->sc_poll_sdmapage->tx, sizeof(gtmpsc_polltx_t),
300 		    NULL, BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
301 		if (err != 0) {
302 			aprint_error_dev(sc->sc_dev,
303 			    "bus_dmamap_load tx error 0x%x\n", err);
304 			goto fail3;
305 		}
306 		err = bus_dmamap_create(sc->sc_dmat, sizeof(gtmpsc_pollrx_t), 1,
307 		   sizeof(gtmpsc_pollrx_t), 0, BUS_DMA_NOWAIT,
308 		   &sc->sc_rxdma_map);
309 		if (err != 0) {
310 			aprint_error_dev(sc->sc_dev,
311 			    "bus_dmamap_create rx error 0x%x\n", err);
312 			goto fail4;
313 		}
314 		err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxdma_map,
315 		    sc->sc_poll_sdmapage->rx, sizeof(gtmpsc_pollrx_t),
316 		    NULL, BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
317 		if (err != 0) {
318 			aprint_error_dev(sc->sc_dev,
319 			    "bus_dmamap_load rx error 0x%x\n", err);
320 			goto fail5;
321 		}
322 
323 		sc->sc_brg = unit;		/* XXXXX */
324 		sc->sc_baudrate = GT_MPSC_DEFAULT_BAUD_RATE;
325 	}
326 	aprint_normal_dev(self, "with SDMA offset 0x%04x-0x%04x\n",
327 	    GTSDMA_BASE(unit), GTSDMA_BASE(unit) + GTSDMA_SIZE - 1);
328 
329 	sc->sc_rx_ready = 0;
330 	sc->sc_tx_busy = 0;
331 	sc->sc_tx_done = 0;
332 	sc->sc_tx_stopped = 0;
333 	sc->sc_heldchange = 0;
334 
335 	gtmpsc_txdesc_init(sc);
336 	gtmpsc_rxdesc_init(sc);
337 
338 	sc->sc_tty = tp = tty_alloc();
339 	tp->t_oproc = gtmpscstart;
340 	tp->t_param = gtmpscparam;
341 	tty_attach(tp);
342 
343 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_HIGH);
344 
345 	/*
346 	 * clear any pending SDMA interrupts for this unit
347 	 */
348 	(void) gt_sdma_icause(device_parent(sc->sc_dev),
349 	    SDMA_INTR_RXBUF(sc->sc_unit) |
350 	    SDMA_INTR_RXERR(sc->sc_unit) |
351 	    SDMA_INTR_TXBUF(sc->sc_unit) |
352 	    SDMA_INTR_TXEND(sc->sc_unit));
353 
354 	sc->sc_si = softint_establish(SOFTINT_SERIAL, gtmpsc_softintr, sc);
355 	if (sc->sc_si == NULL)
356 		panic("mpscattach: cannot softint_establish IPL_SOFTSERIAL");
357 
358 	shutdownhook_establish(gtmpsc_shutdownhook, sc);
359 
360 	gtmpscinit_stop(sc);
361 	gtmpscinit_start(sc);
362 
363 	if (sc->sc_flags & GTMPSC_CONSOLE) {
364 		int maj;
365 
366 		/* locate the major number */
367 		maj = cdevsw_lookup_major(&gtmpsc_cdevsw);
368 
369 		tp->t_dev = cn_tab->cn_dev =
370 		    makedev(maj, device_unit(sc->sc_dev));
371 
372 		aprint_normal_dev(self, "console\n");
373 	}
374 
375 #ifdef KGDB
376 	/*
377 	 * Allow kgdb to "take over" this port.  If this is
378 	 * the kgdb device, it has exclusive use.
379 	 */
380 	if (sc->sc_unit == gtmpsckgdbport) {
381 #ifdef MPSC_CONSOLE
382 		if (sc->sc_unit == MPSC_CONSOLE) {
383 			aprint_error_dev(self,
384 			    "(kgdb): cannot share with console\n");
385 			return;
386 		}
387 #endif
388 
389 		sc->sc_flags |= GTMPSC_KGDB;
390 		aprint_normal_dev(self, "kgdb\n");
391 
392 		gtmpsc_txflush(sc);
393 
394 		kgdb_attach(gtmpsc_kgdb_getc, gtmpsc_kgdb_putc, NULL);
395 		kgdb_dev = 123;	/* unneeded, only to satisfy some tests */
396 		gtmpsc_kgdb_attached = 1;
397 		kgdb_connect(1);
398 	}
399 #endif /* KGDB */
400 
401 	return;
402 
403 
404 fail5:
405 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxdma_map);
406 fail4:
407 	bus_dmamap_unload(sc->sc_dmat, sc->sc_txdma_map);
408 fail3:
409 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_txdma_map);
410 fail2:
411 	bus_dmamem_unmap(sc->sc_dmat, kva, PAGE_SIZE);
412 fail1:
413 	bus_dmamem_free(sc->sc_dmat, &segs, 1);
414 fail0:
415 	return;
416 }
417 
418 /* ARGSUSED */
419 int
gtmpsc_intr(void * arg)420 gtmpsc_intr(void *arg)
421 {
422 	struct gt_softc *gt = (struct gt_softc *)arg;
423 	struct gtmpsc_softc *sc;
424 	uint32_t icause;
425 	int i;
426 
427 	icause = gt_sdma_icause(gt->sc_dev, sdma_imask);
428 
429 	for (i = 0; i < GTMPSC_NCHAN; i++) {
430 		sc = device_lookup_private(&gtmpsc_cd, i);
431 		if (sc == NULL)
432 			continue;
433 		mutex_spin_enter(&sc->sc_lock);
434 		if (icause & SDMA_INTR_RXBUF(sc->sc_unit)) {
435 			gtmpsc_intr_rx(sc);
436 			icause &= ~SDMA_INTR_RXBUF(sc->sc_unit);
437 		}
438 		if (icause & SDMA_INTR_TXBUF(sc->sc_unit)) {
439 			gtmpsc_intr_tx(sc);
440 			icause &= ~SDMA_INTR_TXBUF(sc->sc_unit);
441 		}
442 		mutex_spin_exit(&sc->sc_lock);
443 	}
444 
445 	return 1;
446 }
447 
448 STATIC void
gtmpsc_softintr(void * arg)449 gtmpsc_softintr(void *arg)
450 {
451 	struct gtmpsc_softc *sc = arg;
452 	struct tty *tp = sc->sc_tty;
453 	gtmpsc_pollrx_t *vrxp;
454 	int code;
455 	u_int cc;
456 	u_char *get, *end, lsr;
457 	int (*rint)(int, struct tty *) = tp->t_linesw->l_rint;
458 
459 	if (sc->sc_rx_ready) {
460 		sc->sc_rx_ready = 0;
461 
462 		cc = sc->sc_rcvcnt;
463 
464 		/* If not yet open, drop the entire buffer content here */
465 		if (!ISSET(tp->t_state, TS_ISOPEN))
466 			cc = 0;
467 
468 		vrxp = &sc->sc_poll_sdmapage->rx[sc->sc_rcvrx];
469 		end = vrxp->rxbuf + vrxp->rxdesc.sdma_cnt;
470 		get = vrxp->rxbuf + sc->sc_roffset;
471 		while (cc > 0) {
472 			code = *get;
473 			lsr = vrxp->rxdesc.sdma_csr;
474 
475 			if (ISSET(lsr,
476 			    SDMA_CSR_RX_PE |
477 			    SDMA_CSR_RX_FR |
478 			    SDMA_CSR_RX_OR |
479 			    SDMA_CSR_RX_BR)) {
480 				if (ISSET(lsr, SDMA_CSR_RX_OR))
481 					;	/* XXXXX not yet... */
482 				if (ISSET(lsr, SDMA_CSR_RX_BR | SDMA_CSR_RX_FR))
483 					SET(code, TTY_FE);
484 				if (ISSET(lsr, SDMA_CSR_RX_PE))
485 					SET(code, TTY_PE);
486 			}
487 
488 			if ((*rint)(code, tp) == -1) {
489 				/*
490 				 * The line discipline's buffer is out of space.
491 				 */
492 				/* XXXXX not yet... */
493 			}
494 			if (++get >= end) {
495 				/* cleanup this descriptor, and return to DMA */
496 				CLEANUP_AND_RETURN_RXDMA(sc, sc->sc_rcvrx);
497 				sc->sc_rcvrx =
498 				    (sc->sc_rcvrx + 1) % GTMPSC_NTXDESC;
499 				vrxp = &sc->sc_poll_sdmapage->rx[sc->sc_rcvrx];
500 				end = vrxp->rxbuf + vrxp->rxdesc.sdma_cnt;
501 				get = vrxp->rxbuf + sc->sc_roffset;
502 			}
503 			cc--;
504 		}
505 	}
506 	if (sc->sc_tx_done) {
507 		sc->sc_tx_done = 0;
508 		CLR(tp->t_state, TS_BUSY);
509 		if (ISSET(tp->t_state, TS_FLUSH))
510 		    CLR(tp->t_state, TS_FLUSH);
511 		else
512 		    ndflush(&tp->t_outq, (int)(sc->sc_tba - tp->t_outq.c_cf));
513 		(*tp->t_linesw->l_start)(tp);
514 	}
515 }
516 
517 int
gtmpscopen(dev_t dev,int flag,int mode,struct lwp * l)518 gtmpscopen(dev_t dev, int flag, int mode, struct lwp *l)
519 {
520 	struct gtmpsc_softc *sc;
521 	int unit = GTMPSCUNIT(dev);
522 	struct tty *tp;
523 	int s;
524 	int error;
525 
526 	sc = device_lookup_private(&gtmpsc_cd, unit);
527 	if (!sc)
528 		return ENXIO;
529 #ifdef KGDB
530 	/*
531 	 * If this is the kgdb port, no other use is permitted.
532 	 */
533 	if (sc->sc_flags & GTMPSC_KGDB)
534 		return EBUSY;
535 #endif
536 	tp = sc->sc_tty;
537 	if (kauth_authorize_device_tty(l->l_cred, KAUTH_DEVICE_TTY_OPEN, tp))
538 		return EBUSY;
539 
540 	s = spltty();
541 
542 	if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) {
543 		struct termios t;
544 
545 		tp->t_dev = dev;
546 
547 		mutex_spin_enter(&sc->sc_lock);
548 
549 		/* Turn on interrupts. */
550 		sdma_imask |= SDMA_INTR_RXBUF(sc->sc_unit);
551 		gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask);
552 
553 		/* Clear PPS capture state on first open. */
554 		mutex_spin_enter(&timecounter_lock);
555 		memset(&sc->sc_pps_state, 0, sizeof(sc->sc_pps_state));
556 		sc->sc_pps_state.ppscap = PPS_CAPTUREASSERT | PPS_CAPTURECLEAR;
557 		pps_init(&sc->sc_pps_state);
558 		mutex_spin_exit(&timecounter_lock);
559 
560 		mutex_spin_exit(&sc->sc_lock);
561 
562 		if (sc->sc_flags & GTMPSC_CONSOLE) {
563 			t.c_ospeed = sc->sc_baudrate;
564 			t.c_cflag = sc->sc_cflag;
565 		} else {
566 			t.c_ospeed = TTYDEF_SPEED;
567 			t.c_cflag = TTYDEF_CFLAG;
568 		}
569 		t.c_ispeed = t.c_ospeed;
570 
571 		/* Make sure gtmpscparam() will do something. */
572 		tp->t_ospeed = 0;
573 		(void) gtmpscparam(tp, &t);
574 		tp->t_iflag = TTYDEF_IFLAG;
575 		tp->t_oflag = TTYDEF_OFLAG;
576 		tp->t_lflag = TTYDEF_LFLAG;
577 		ttychars(tp);
578 		ttsetwater(tp);
579 
580 		mutex_spin_enter(&sc->sc_lock);
581 
582 		/* Clear the input/output ring */
583 		sc->sc_rcvcnt = 0;
584 		sc->sc_roffset = 0;
585 		sc->sc_rcvrx = 0;
586 		sc->sc_rcvdrx = 0;
587 		sc->sc_nexttx = 0;
588 		sc->sc_lasttx = 0;
589 
590 		/*
591 		 * enable SDMA receive
592 		 */
593 		GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_ERD);
594 
595 		mutex_spin_exit(&sc->sc_lock);
596 	}
597 	splx(s);
598 	error = ttyopen(tp, GTMPSCDIALOUT(dev), ISSET(flag, O_NONBLOCK));
599 	if (error)
600 		goto bad;
601 
602 	error = (*tp->t_linesw->l_open)(dev, tp);
603 	if (error)
604 		goto bad;
605 
606 	return 0;
607 
608 bad:
609 	if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) {
610 		/*
611 		 * We failed to open the device, and nobody else had it opened.
612 		 * Clean up the state as appropriate.
613 		 */
614 		gtmpscshutdown(sc);
615 	}
616 
617 	return error;
618 }
619 
620 int
gtmpscclose(dev_t dev,int flag,int mode,struct lwp * l)621 gtmpscclose(dev_t dev, int flag, int mode, struct lwp *l)
622 {
623 	int unit = GTMPSCUNIT(dev);
624 	struct gtmpsc_softc *sc = device_lookup_private(&gtmpsc_cd, unit);
625 	struct tty *tp = sc->sc_tty;
626 
627 	if (!ISSET(tp->t_state, TS_ISOPEN))
628 		return 0;
629 
630 	(*tp->t_linesw->l_close)(tp, flag);
631 	ttyclose(tp);
632 
633 	if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) {
634 		/*
635 		 * Although we got a last close, the device may still be in
636 		 * use; e.g. if this was the dialout node, and there are still
637 		 * processes waiting for carrier on the non-dialout node.
638 		 */
639 		gtmpscshutdown(sc);
640 	}
641 
642 	return 0;
643 }
644 
645 int
gtmpscread(dev_t dev,struct uio * uio,int flag)646 gtmpscread(dev_t dev, struct uio *uio, int flag)
647 {
648 	struct gtmpsc_softc *sc =
649 	    device_lookup_private(&gtmpsc_cd, GTMPSCUNIT(dev));
650 	struct tty *tp = sc->sc_tty;
651 
652 	return (*tp->t_linesw->l_read)(tp, uio, flag);
653 }
654 
655 int
gtmpscwrite(dev_t dev,struct uio * uio,int flag)656 gtmpscwrite(dev_t dev, struct uio *uio, int flag)
657 {
658 	struct gtmpsc_softc *sc =
659 	    device_lookup_private(&gtmpsc_cd, GTMPSCUNIT(dev));
660 	struct tty *tp = sc->sc_tty;
661 
662 	return (*tp->t_linesw->l_write)(tp, uio, flag);
663 }
664 
665 int
gtmpscioctl(dev_t dev,u_long cmd,void * data,int flag,struct lwp * l)666 gtmpscioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
667 {
668 	struct gtmpsc_softc *sc =
669 	    device_lookup_private(&gtmpsc_cd, GTMPSCUNIT(dev));
670 	struct tty *tp = sc->sc_tty;
671 	int error;
672 
673 	error = (*tp->t_linesw->l_ioctl)(tp, cmd, data, flag, l);
674 	if (error != EPASSTHROUGH)
675 		return error;
676 
677 	error = ttioctl(tp, cmd, data, flag, l);
678 	if (error != EPASSTHROUGH)
679 		return error;
680 
681 	error = 0;
682 	switch (cmd) {
683 	case TIOCSFLAGS:
684 		error = kauth_authorize_device_tty(l->l_cred,
685 		    KAUTH_DEVICE_TTY_PRIVSET, tp);
686 		if (error)
687 			return error;
688 		break;
689 	default:
690 		/* nothing */
691 		break;
692 	}
693 
694 	mutex_spin_enter(&sc->sc_lock);
695 
696 	switch (cmd) {
697 	case PPS_IOC_CREATE:
698 	case PPS_IOC_DESTROY:
699 	case PPS_IOC_GETPARAMS:
700 	case PPS_IOC_SETPARAMS:
701 	case PPS_IOC_GETCAP:
702 	case PPS_IOC_FETCH:
703 #ifdef PPS_SYNC
704 	case PPS_IOC_KCBIND:
705 #endif
706 		mutex_spin_enter(&timecounter_lock);
707 		error = pps_ioctl(cmd, data, &sc->sc_pps_state);
708 		mutex_spin_exit(&timecounter_lock);
709 		break;
710 
711 	case TIOCDCDTIMESTAMP:	/* XXX old, overloaded  API used by xntpd v3 */
712 		mutex_spin_enter(&timecounter_lock);
713 #ifndef PPS_TRAILING_EDGE
714 		TIMESPEC_TO_TIMEVAL((struct timeval *)data,
715 		    &sc->sc_pps_state.ppsinfo.assert_timestamp);
716 #else
717 		TIMESPEC_TO_TIMEVAL((struct timeval *)data,
718 		    &sc->sc_pps_state.ppsinfo.clear_timestamp);
719 #endif
720 		mutex_spin_exit(&timecounter_lock);
721 		break;
722 
723 	default:
724 		error = EPASSTHROUGH;
725 		break;
726 	}
727 
728 	mutex_spin_exit(&sc->sc_lock);
729 
730 	return error;
731 }
732 
733 void
gtmpscstop(struct tty * tp,int flag)734 gtmpscstop(struct tty *tp, int flag)
735 {
736 }
737 
738 struct tty *
gtmpsctty(dev_t dev)739 gtmpsctty(dev_t dev)
740 {
741 	struct gtmpsc_softc *sc =
742 	    device_lookup_private(&gtmpsc_cd, GTMPSCUNIT(dev));
743 
744 	return sc->sc_tty;
745 }
746 
747 int
gtmpscpoll(dev_t dev,int events,struct lwp * l)748 gtmpscpoll(dev_t dev, int events, struct lwp *l)
749 {
750 	struct gtmpsc_softc *sc =
751 	    device_lookup_private(&gtmpsc_cd, GTMPSCUNIT(dev));
752 	struct tty *tp = sc->sc_tty;
753 
754 	return (*tp->t_linesw->l_poll)(tp, events, l);
755 }
756 
757 
758 STATIC void
gtmpscstart(struct tty * tp)759 gtmpscstart(struct tty *tp)
760 {
761 	struct gtmpsc_softc *sc;
762 	unsigned char *tba;
763 	unsigned int unit;
764 	int s, tbc;
765 
766 	unit = GTMPSCUNIT(tp->t_dev);
767 	sc = device_lookup_private(&gtmpsc_cd, unit);
768 	if (sc == NULL)
769 		return;
770 
771 	s = spltty();
772 	if (ISSET(tp->t_state, TS_TIMEOUT | TS_BUSY | TS_TTSTOP))
773 		goto out;
774 	if (sc->sc_tx_stopped)
775 		goto out;
776 	if (!ttypull(tp))
777 		goto out;
778 
779 	/* Grab the first contiguous region of buffer space. */
780 	tba = tp->t_outq.c_cf;
781 	tbc = ndqb(&tp->t_outq, 0);
782 
783 	mutex_spin_enter(&sc->sc_lock);
784 
785 	sc->sc_tba = tba;
786 	sc->sc_tbc = tbc;
787 
788 	sdma_imask |= SDMA_INTR_TXBUF(sc->sc_unit);
789 	gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask);
790 	SET(tp->t_state, TS_BUSY);
791 	sc->sc_tx_busy = 1;
792 	gtmpsc_write(sc);
793 
794 	mutex_spin_exit(&sc->sc_lock);
795 out:
796 	splx(s);
797 }
798 
799 STATIC int
gtmpscparam(struct tty * tp,struct termios * t)800 gtmpscparam(struct tty *tp, struct termios *t)
801 {
802 	struct gtmpsc_softc *sc =
803 	    device_lookup_private(&gtmpsc_cd, GTMPSCUNIT(tp->t_dev));
804 
805 	/* Check requested parameters. */
806 	if (compute_cdv(t->c_ospeed) < 0)
807 		return EINVAL;
808 	if (t->c_ispeed && t->c_ispeed != t->c_ospeed)
809 		return EINVAL;
810 
811 	/*
812 	 * If there were no changes, don't do anything.  This avoids dropping
813 	 * input and improves performance when all we did was frob things like
814 	 * VMIN and VTIME.
815 	 */
816 	if (tp->t_ospeed == t->c_ospeed &&
817 	    tp->t_cflag == t->c_cflag)
818 		return 0;
819 
820 	mutex_spin_enter(&sc->sc_lock);
821 
822 	/* And copy to tty. */
823 	tp->t_ispeed = 0;
824 	tp->t_ospeed = t->c_ospeed;
825 	tp->t_cflag = t->c_cflag;
826 
827 	sc->sc_baudrate = t->c_ospeed;
828 
829 	if (!sc->sc_heldchange) {
830 		if (sc->sc_tx_busy) {
831 			sc->sc_heldtbc = sc->sc_tbc;
832 			sc->sc_tbc = 0;
833 			sc->sc_heldchange = 1;
834 		} else
835 			gtmpsc_loadchannelregs(sc);
836 	}
837 
838 	mutex_spin_exit(&sc->sc_lock);
839 
840 	/* Fake carrier on */
841 	(void) (*tp->t_linesw->l_modem)(tp, 1);
842 
843 	return 0;
844 }
845 
846 void
gtmpsc_shutdownhook(void * arg)847 gtmpsc_shutdownhook(void *arg)
848 {
849 	gtmpsc_softc_t *sc = (gtmpsc_softc_t *)arg;
850 
851 	gtmpsc_txflush(sc);
852 }
853 
854 /*
855  * Convert to MPCR from cflag(CS[5678] and CSTOPB).
856  */
857 STATIC uint32_t
cflag2mpcr(tcflag_t cflag)858 cflag2mpcr(tcflag_t cflag)
859 {
860 	uint32_t mpcr = 0;
861 
862 	switch (ISSET(cflag, CSIZE)) {
863 	case CS5:
864 		SET(mpcr, GTMPSC_MPCR_CL_5);
865 		break;
866 	case CS6:
867 		SET(mpcr, GTMPSC_MPCR_CL_6);
868 		break;
869 	case CS7:
870 		SET(mpcr, GTMPSC_MPCR_CL_7);
871 		break;
872 	case CS8:
873 		SET(mpcr, GTMPSC_MPCR_CL_8);
874 		break;
875 	}
876 	if (ISSET(cflag, CSTOPB))
877 		SET(mpcr, GTMPSC_MPCR_SBL_2);
878 
879 	return mpcr;
880 }
881 
882 STATIC void
gtmpsc_intr_rx(struct gtmpsc_softc * sc)883 gtmpsc_intr_rx(struct gtmpsc_softc *sc)
884 {
885 	gtmpsc_pollrx_t *vrxp;
886 	uint32_t csr;
887 	int kick, ix;
888 
889 	kick = 0;
890 
891 	/* already handled in gtmpsc_common_getc() */
892 	if (sc->sc_rcvdrx == sc->sc_rcvrx)
893 		return;
894 
895 	ix = sc->sc_rcvdrx;
896 	vrxp = &sc->sc_poll_sdmapage->rx[ix];
897 	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
898 	    ix * sizeof(gtmpsc_pollrx_t),
899 	    sizeof(sdma_desc_t),
900 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
901 	csr = vrxp->rxdesc.sdma_csr;
902 	while (!(csr & SDMA_CSR_RX_OWN)) {
903 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
904 		    ix * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),
905 		    sizeof(vrxp->rxbuf),
906 		    BUS_DMASYNC_POSTREAD);
907 		vrxp->rxdesc.sdma_cnt &= SDMA_RX_CNT_BCNT_MASK;
908 		if (vrxp->rxdesc.sdma_csr & SDMA_CSR_RX_BR) {
909 			int cn_trapped = 0;
910 
911 			cn_check_magic(sc->sc_tty->t_dev,
912 			    CNC_BREAK, gtmpsc_cnm_state);
913 			if (cn_trapped)
914 				continue;
915 #if defined(KGDB) && !defined(DDB)
916 			if (ISSET(sc->sc_flags, GTMPSC_KGDB)) {
917 				kgdb_connect(1);
918 				continue;
919 			}
920 #endif
921 		}
922 
923 		sc->sc_rcvcnt += vrxp->rxdesc.sdma_cnt;
924 		kick = 1;
925 
926 		ix = (ix + 1) % GTMPSC_NTXDESC;
927 		vrxp = &sc->sc_poll_sdmapage->rx[ix];
928 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
929 		    ix * sizeof(gtmpsc_pollrx_t),
930 		    sizeof(sdma_desc_t),
931 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
932 		csr = vrxp->rxdesc.sdma_csr;
933 	}
934 	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
935 	    ix * sizeof(gtmpsc_pollrx_t),
936 	    sizeof(sdma_desc_t),
937 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
938 
939 	if (kick) {
940 		sc->sc_rcvdrx = ix;
941 		sc->sc_rx_ready = 1;
942 		softint_schedule(sc->sc_si);
943 	}
944 }
945 
946 STATIC __inline void
gtmpsc_intr_tx(struct gtmpsc_softc * sc)947 gtmpsc_intr_tx(struct gtmpsc_softc *sc)
948 {
949 	gtmpsc_polltx_t *vtxp;
950 	uint32_t csr;
951 	int ix;
952 
953 	/*
954 	 * If we've delayed a parameter change, do it now,
955 	 * and restart output.
956 	 */
957 	if (sc->sc_heldchange) {
958 		gtmpsc_loadchannelregs(sc);
959 		sc->sc_heldchange = 0;
960 		sc->sc_tbc = sc->sc_heldtbc;
961 		sc->sc_heldtbc = 0;
962 	}
963 
964 	/* Clean-up TX descriptors and buffers */
965 	ix = sc->sc_lasttx;
966 	while (ix != sc->sc_nexttx) {
967 		vtxp = &sc->sc_poll_sdmapage->tx[ix];
968 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
969 		    ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
970 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
971 		csr = vtxp->txdesc.sdma_csr;
972 		if (csr & SDMA_CSR_TX_OWN) {
973 			bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
974 			    ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
975 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
976 			break;
977 		}
978 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
979 		    ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t),
980 		    sizeof(vtxp->txbuf), BUS_DMASYNC_POSTWRITE);
981 		ix = (ix + 1) % GTMPSC_NTXDESC;
982 	}
983 	sc->sc_lasttx = ix;
984 
985 	/* Output the next chunk of the contiguous buffer */
986 	gtmpsc_write(sc);
987 	if (sc->sc_tbc == 0 && sc->sc_tx_busy) {
988 		sc->sc_tx_busy = 0;
989 		sc->sc_tx_done = 1;
990 		softint_schedule(sc->sc_si);
991 		sdma_imask &= ~SDMA_INTR_TXBUF(sc->sc_unit);
992 		gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask);
993 	}
994 }
995 
996 /*
997  * gtmpsc_write - write a buffer into the hardware
998  */
999 STATIC void
gtmpsc_write(struct gtmpsc_softc * sc)1000 gtmpsc_write(struct gtmpsc_softc *sc)
1001 {
1002 	gtmpsc_polltx_t *vtxp;
1003 	uint32_t sdcm, ix;
1004 	int kick, n;
1005 
1006 	kick = 0;
1007 	while (sc->sc_tbc > 0 && sc->sc_nexttx != sc->sc_lasttx) {
1008 		n = uimin(sc->sc_tbc, GTMPSC_TXBUFSZ);
1009 
1010 		ix = sc->sc_nexttx;
1011 		sc->sc_nexttx = (ix + 1) % GTMPSC_NTXDESC;
1012 
1013 		vtxp = &sc->sc_poll_sdmapage->tx[ix];
1014 
1015 		memcpy(vtxp->txbuf, sc->sc_tba, n);
1016 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1017 		    ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t),
1018 		    sizeof(vtxp->txbuf), BUS_DMASYNC_PREWRITE);
1019 
1020 		vtxp->txdesc.sdma_cnt = (n << SDMA_TX_CNT_BCNT_SHIFT) | n;
1021 		vtxp->txdesc.sdma_csr =
1022 		    SDMA_CSR_TX_L	|
1023 		    SDMA_CSR_TX_F	|
1024 		    SDMA_CSR_TX_EI	|
1025 		    SDMA_CSR_TX_OWN;
1026 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1027 		    ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
1028 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1029 
1030 		sc->sc_tbc -= n;
1031 		sc->sc_tba += n;
1032 		kick = 1;
1033 	}
1034 	if (kick) {
1035 		/*
1036 		 * now kick some SDMA
1037 		 */
1038 		sdcm = GT_SDMA_READ(sc, SDMA_SDCM);
1039 		if ((sdcm & SDMA_SDCM_TXD) == 0)
1040 			GT_SDMA_WRITE(sc, SDMA_SDCM, sdcm | SDMA_SDCM_TXD);
1041 	}
1042 }
1043 
1044 /*
1045  * gtmpsc_txflush - wait for output to drain
1046  */
1047 STATIC void
gtmpsc_txflush(gtmpsc_softc_t * sc)1048 gtmpsc_txflush(gtmpsc_softc_t *sc)
1049 {
1050 	gtmpsc_polltx_t *vtxp;
1051 	int ix, limit = 4000000;	/* 4 seconds */
1052 
1053 	ix = sc->sc_nexttx - 1;
1054 	if (ix < 0)
1055 		ix = GTMPSC_NTXDESC - 1;
1056 
1057 	vtxp = &sc->sc_poll_sdmapage->tx[ix];
1058 	while (limit > 0) {
1059 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1060 		    ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
1061 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1062 		if ((vtxp->txdesc.sdma_csr & SDMA_CSR_TX_OWN) == 0)
1063 			break;
1064 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1065 		    ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
1066 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1067 		DELAY(1);
1068 		limit -= 1;
1069 	}
1070 }
1071 
1072 /*
1073  * gtmpsc_rxdesc_init - set up RX descriptor ring
1074  */
1075 STATIC void
gtmpsc_rxdesc_init(struct gtmpsc_softc * sc)1076 gtmpsc_rxdesc_init(struct gtmpsc_softc *sc)
1077 {
1078 	gtmpsc_pollrx_t *vrxp, *prxp, *first_prxp;
1079 	sdma_desc_t *dp;
1080 	int i;
1081 
1082 	first_prxp = prxp =
1083 	    (gtmpsc_pollrx_t *)sc->sc_rxdma_map->dm_segs->ds_addr;
1084 	vrxp = sc->sc_poll_sdmapage->rx;
1085 	for (i = 0; i < GTMPSC_NRXDESC; i++) {
1086 		dp = &vrxp->rxdesc;
1087 		dp->sdma_csr =
1088 		    SDMA_CSR_RX_L|SDMA_CSR_RX_F|SDMA_CSR_RX_OWN|SDMA_CSR_RX_EI;
1089 		dp->sdma_cnt = GTMPSC_RXBUFSZ << SDMA_RX_CNT_BUFSZ_SHIFT;
1090 		dp->sdma_bufp = (uint32_t)&prxp->rxbuf;
1091 		vrxp++;
1092 		prxp++;
1093 		dp->sdma_next = (uint32_t)&prxp->rxdesc;
1094 
1095 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1096 		    i * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),
1097 		    sizeof(vrxp->rxbuf), BUS_DMASYNC_PREREAD);
1098 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1099 		    i * sizeof(gtmpsc_pollrx_t), sizeof(sdma_desc_t),
1100 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1101 	}
1102 	dp = &vrxp->rxdesc;
1103 	dp->sdma_csr =
1104 	    SDMA_CSR_RX_L | SDMA_CSR_RX_F | SDMA_CSR_RX_OWN | SDMA_CSR_RX_EI;
1105 	dp->sdma_cnt = GTMPSC_RXBUFSZ << SDMA_RX_CNT_BUFSZ_SHIFT;
1106 	dp->sdma_bufp = (uint32_t)&prxp->rxbuf;
1107 	dp->sdma_next = (uint32_t)&first_prxp->rxdesc;
1108 
1109 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1110 	    i * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),
1111 	    sizeof(vrxp->rxbuf), BUS_DMASYNC_PREREAD);
1112 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1113 	    i * sizeof(gtmpsc_pollrx_t), sizeof(sdma_desc_t),
1114 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1115 
1116 	sc->sc_rcvcnt = 0;
1117 	sc->sc_roffset = 0;
1118 	sc->sc_rcvrx = 0;
1119 	sc->sc_rcvdrx = 0;
1120 }
1121 
1122 /*
1123  * gtmpsc_txdesc_init - set up TX descriptor ring
1124  */
1125 STATIC void
gtmpsc_txdesc_init(struct gtmpsc_softc * sc)1126 gtmpsc_txdesc_init(struct gtmpsc_softc *sc)
1127 {
1128 	gtmpsc_polltx_t *vtxp, *ptxp, *first_ptxp;
1129 	sdma_desc_t *dp;
1130 	int i;
1131 
1132 	first_ptxp = ptxp =
1133 	    (gtmpsc_polltx_t *)sc->sc_txdma_map->dm_segs->ds_addr;
1134 	vtxp = sc->sc_poll_sdmapage->tx;
1135 	for (i = 0; i < GTMPSC_NTXDESC; i++) {
1136 		dp = &vtxp->txdesc;
1137 		dp->sdma_csr = 0;
1138 		dp->sdma_cnt = 0;
1139 		dp->sdma_bufp = (uint32_t)&ptxp->txbuf;
1140 		vtxp++;
1141 		ptxp++;
1142 		dp->sdma_next = (uint32_t)&ptxp->txdesc;
1143 	}
1144 	dp = &vtxp->txdesc;
1145 	dp->sdma_csr = 0;
1146 	dp->sdma_cnt = 0;
1147 	dp->sdma_bufp = (uint32_t)&ptxp->txbuf;
1148 	dp->sdma_next = (uint32_t)&first_ptxp->txdesc;
1149 
1150 	sc->sc_nexttx = 0;
1151 	sc->sc_lasttx = 0;
1152 }
1153 
1154 STATIC void
gtmpscinit_stop(struct gtmpsc_softc * sc)1155 gtmpscinit_stop(struct gtmpsc_softc *sc)
1156 {
1157 	uint32_t csr;
1158 	int timo = 10000;	/* XXXX */
1159 
1160 	/* Abort MPSC Rx (aborting Tx messes things up) */
1161 	GT_MPSC_WRITE(sc, GTMPSC_CHRN(2), GTMPSC_CHR2_RXABORT);
1162 
1163 	/* abort SDMA RX and stop TX for MPSC unit */
1164 	GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_AR | SDMA_SDCM_STD);
1165 
1166 	/* poll for SDMA RX abort completion */
1167 	for (; timo > 0; timo--) {
1168 		csr = GT_SDMA_READ(sc, SDMA_SDCM);
1169 		if (!(csr & (SDMA_SDCM_AR | SDMA_SDCM_AT)))
1170 			break;
1171 		DELAY(50);
1172 	}
1173 }
1174 
1175 STATIC void
gtmpscinit_start(struct gtmpsc_softc * sc)1176 gtmpscinit_start(struct gtmpsc_softc *sc)
1177 {
1178 
1179 	/*
1180 	 * Set pointers of current/first descriptor of TX to SDMA register.
1181 	 */
1182 	GT_SDMA_WRITE(sc, SDMA_SCTDP, sc->sc_txdma_map->dm_segs->ds_addr);
1183 	GT_SDMA_WRITE(sc, SDMA_SFTDP, sc->sc_txdma_map->dm_segs->ds_addr);
1184 
1185 	/*
1186 	 * Set pointer of current descriptor of TX to SDMA register.
1187 	 */
1188 	GT_SDMA_WRITE(sc, SDMA_SCRDP, sc->sc_rxdma_map->dm_segs->ds_addr);
1189 
1190 	/*
1191 	 * initialize SDMA unit Configuration Register
1192 	 */
1193 	GT_SDMA_WRITE(sc, SDMA_SDC,
1194 	    SDMA_SDC_BSZ_8x64 | SDMA_SDC_SFM|SDMA_SDC_RFT);
1195 
1196 	gtmpsc_loadchannelregs(sc);
1197 
1198 	/*
1199 	 * set MPSC LO and HI port config registers for GTMPSC unit
1200  	 */
1201 	GT_MPSC_WRITE(sc, GTMPSC_MMCR_LO,
1202 	    GTMPSC_MMCR_LO_MODE_UART	|
1203 	    GTMPSC_MMCR_LO_ET		|
1204 	    GTMPSC_MMCR_LO_ER		|
1205 	    GTMPSC_MMCR_LO_NLM);
1206 	GT_MPSC_WRITE(sc, GTMPSC_MMCR_HI,
1207 	    GTMPSC_MMCR_HI_TCDV_DEFAULT	|
1208 	    GTMPSC_MMCR_HI_RDW		|
1209 	    GTMPSC_MMCR_HI_RCDV_DEFAULT);
1210 
1211 	/*
1212 	 * tell MPSC receive the Enter Hunt
1213 	 */
1214 	GT_MPSC_WRITE(sc, GTMPSC_CHRN(2), GTMPSC_CHR2_EH);
1215 }
1216 
1217 STATIC void
gtmpscshutdown(struct gtmpsc_softc * sc)1218 gtmpscshutdown(struct gtmpsc_softc *sc)
1219 {
1220 	struct tty *tp;
1221 
1222 #ifdef KGDB
1223 	if (sc->sc_flags & GTMPSCF_KGDB != 0)
1224 		return;
1225 #endif
1226 	tp = sc->sc_tty;
1227 	mutex_spin_enter(&sc->sc_lock);
1228 	/* Fake carrier off */
1229 	(void) (*tp->t_linesw->l_modem)(tp, 0);
1230 	sdma_imask &= ~SDMA_INTR_RXBUF(sc->sc_unit);
1231 	gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask);
1232 	mutex_spin_exit(&sc->sc_lock);
1233 }
1234 
1235 STATIC void
gtmpsc_loadchannelregs(struct gtmpsc_softc * sc)1236 gtmpsc_loadchannelregs(struct gtmpsc_softc *sc)
1237 {
1238 
1239 	if (sc->sc_dev != NULL)
1240 		gt_brg_bcr(device_parent(sc->sc_dev), sc->sc_brg,
1241 	    	    GT_MPSC_CLOCK_SOURCE | compute_cdv(sc->sc_baudrate));
1242 	GT_MPSC_WRITE(sc, GTMPSC_CHRN(3), GTMPSC_MAXIDLE(sc->sc_baudrate));
1243 
1244 	/*
1245 	 * set MPSC Protocol configuration register for GTMPSC unit
1246 	 */
1247 	GT_MPSC_WRITE(sc, GTMPSC_MPCR, cflag2mpcr(sc->sc_cflag));
1248 }
1249 
1250 
1251 #ifdef MPSC_CONSOLE
1252 /*
1253  * Following are all routines needed for MPSC to act as console
1254  */
1255 STATIC int
gtmpsccngetc(dev_t dev)1256 gtmpsccngetc(dev_t dev)
1257 {
1258 
1259 	return gtmpsc_common_getc(&gtmpsc_cn_softc);
1260 }
1261 
1262 STATIC void
gtmpsccnputc(dev_t dev,int c)1263 gtmpsccnputc(dev_t dev, int c)
1264 {
1265 
1266 	gtmpsc_common_putc(&gtmpsc_cn_softc, c);
1267 }
1268 
1269 STATIC void
gtmpsccnpollc(dev_t dev,int on)1270 gtmpsccnpollc(dev_t dev, int on)
1271 {
1272 }
1273 
1274 STATIC void
gtmpsccnhalt(dev_t dev)1275 gtmpsccnhalt(dev_t dev)
1276 {
1277 	gtmpsc_softc_t *sc = &gtmpsc_cn_softc;
1278 	uint32_t csr;
1279 
1280 	/*
1281 	 * flush TX buffers
1282 	 */
1283 	gtmpsc_txflush(sc);
1284 
1285 	/*
1286 	 * stop MPSC unit RX
1287 	 */
1288 	csr = GT_MPSC_READ(sc, GTMPSC_CHRN(2));
1289 	csr &= ~GTMPSC_CHR2_EH;
1290 	csr |= GTMPSC_CHR2_RXABORT;
1291 	GT_MPSC_WRITE(sc, GTMPSC_CHRN(2), csr);
1292 
1293 	DELAY(GTMPSC_RESET_DELAY);
1294 
1295 	/*
1296 	 * abort SDMA RX for MPSC unit
1297 	 */
1298 	GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_AR);
1299 }
1300 
1301 int
gtmpsccnattach(bus_space_tag_t iot,bus_dma_tag_t dmat,bus_addr_t base,int unit,int brg,int speed,tcflag_t tcflag)1302 gtmpsccnattach(bus_space_tag_t iot, bus_dma_tag_t dmat, bus_addr_t base,
1303 	       int unit, int brg, int speed, tcflag_t tcflag)
1304 {
1305 	struct gtmpsc_softc *sc = &gtmpsc_cn_softc;
1306 	int i, res;
1307 	const unsigned char cp[] = "\r\nMPSC Lives!\r\n";
1308 
1309 	res = gtmpsc_hackinit(sc, iot, dmat, base, unit, brg, speed, tcflag);
1310 	if (res != 0)
1311 		return res;
1312 
1313 	gtmpscinit_stop(sc);
1314 	gtmpscinit_start(sc);
1315 
1316 	/*
1317 	 * enable SDMA receive
1318 	 */
1319 	GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_ERD);
1320 
1321 	for (i = 0; i < sizeof(cp); i++) {
1322 		if (*(cp + i) == 0)
1323 			break;
1324 		gtmpsc_common_putc(sc, *(cp + i));
1325 	}
1326 
1327 	cn_tab = &gtmpsc_consdev;
1328 	cn_init_magic(&gtmpsc_cnm_state);
1329 
1330 	return 0;
1331 }
1332 
1333 /*
1334  * gtmpsc_hackinit - hacks required to support GTMPSC console
1335  */
1336 STATIC int
gtmpsc_hackinit(struct gtmpsc_softc * sc,bus_space_tag_t iot,bus_dma_tag_t dmat,bus_addr_t base,int unit,int brg,int baudrate,tcflag_t tcflag)1337 gtmpsc_hackinit(struct gtmpsc_softc *sc, bus_space_tag_t iot,
1338 		bus_dma_tag_t dmat, bus_addr_t base, int unit, int brg,
1339 		int baudrate, tcflag_t tcflag)
1340 {
1341 	gtmpsc_poll_sdma_t *cn_dmapage =
1342 	    (gtmpsc_poll_sdma_t *)gtmpsc_cn_dmapage;
1343 	int error;
1344 
1345 	DPRINTF(("hackinit\n"));
1346 
1347 	memset(sc, 0, sizeof(struct gtmpsc_softc));
1348 	error = bus_space_map(iot, base + GTMPSC_BASE(unit), GTMPSC_SIZE, 0,
1349 	    &sc->sc_mpsch);
1350 	if (error != 0)
1351 		goto fail0;
1352 
1353 	error = bus_space_map(iot, base + GTSDMA_BASE(unit), GTSDMA_SIZE, 0,
1354 	    &sc->sc_sdmah);
1355 	if (error != 0)
1356 		goto fail1;
1357 	error = bus_dmamap_create(dmat, sizeof(gtmpsc_polltx_t), 1,
1358 	   sizeof(gtmpsc_polltx_t), 0, BUS_DMA_NOWAIT, &sc->sc_txdma_map);
1359 	if (error != 0)
1360 		goto fail2;
1361 	error = bus_dmamap_load(dmat, sc->sc_txdma_map, cn_dmapage->tx,
1362 	    sizeof(gtmpsc_polltx_t), NULL,
1363 	    BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
1364 	if (error != 0)
1365 		goto fail3;
1366 	error = bus_dmamap_create(dmat, sizeof(gtmpsc_pollrx_t), 1,
1367 	   sizeof(gtmpsc_pollrx_t), 0, BUS_DMA_NOWAIT,
1368 	   &sc->sc_rxdma_map);
1369 	if (error != 0)
1370 		goto fail4;
1371 	error = bus_dmamap_load(dmat, sc->sc_rxdma_map, cn_dmapage->rx,
1372 	    sizeof(gtmpsc_pollrx_t), NULL,
1373 	    BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
1374 	if (error != 0)
1375 		goto fail5;
1376 
1377 	sc->sc_iot = iot;
1378 	sc->sc_dmat = dmat;
1379 	sc->sc_poll_sdmapage = cn_dmapage;
1380 	sc->sc_brg = brg;
1381 	sc->sc_baudrate = baudrate;
1382 	sc->sc_cflag = tcflag;
1383 
1384 	gtmpsc_txdesc_init(sc);
1385 	gtmpsc_rxdesc_init(sc);
1386 
1387 	return 0;
1388 
1389 fail5:
1390 	bus_dmamap_destroy(dmat, sc->sc_rxdma_map);
1391 fail4:
1392 	bus_dmamap_unload(dmat, sc->sc_txdma_map);
1393 fail3:
1394 	bus_dmamap_destroy(dmat, sc->sc_txdma_map);
1395 fail2:
1396 	bus_space_unmap(iot, sc->sc_sdmah, GTSDMA_SIZE);
1397 fail1:
1398 	bus_space_unmap(iot, sc->sc_mpsch, GTMPSC_SIZE);
1399 fail0:
1400 	return error;
1401 }
1402 #endif	/* MPSC_CONSOLE */
1403 
1404 #ifdef KGDB
1405 STATIC int
gtmpsc_kgdb_getc(void * arg)1406 gtmpsc_kgdb_getc(void *arg)
1407 {
1408 	struct gtmpsc_softc *sc = (struct gtmpsc_softc *)arg;
1409 
1410 	return gtmpsc_common_getc(sc);
1411 }
1412 
1413 STATIC void
gtmpsc_kgdb_putc(void * arg,int c)1414 gtmpsc_kgdb_putc(void *arg, int c)
1415 {
1416 	struct gtmpsc_softc *sc = (struct gtmpsc_softc *)arg;
1417 
1418 	return gtmpsc_common_putc(sc, c);
1419 }
1420 #endif /* KGDB */
1421 
1422 #if defined(MPSC_CONSOLE) || defined(KGDB)
1423 /*
1424  * gtmpsc_common_getc - polled console read
1425  *
1426  *	We copy data from the DMA buffers into a buffer in the softc
1427  *	to reduce descriptor ownership turnaround time
1428  *	MPSC can crater if it wraps descriptor rings,
1429  *	which is asynchronous and throttled only by line speed.
1430  */
1431 STATIC int
gtmpsc_common_getc(struct gtmpsc_softc * sc)1432 gtmpsc_common_getc(struct gtmpsc_softc *sc)
1433 {
1434 	gtmpsc_pollrx_t *vrxp;
1435 	uint32_t csr;
1436 	int ix, ch, wdog_interval = 0;
1437 
1438 	if (!cold)
1439 		mutex_spin_enter(&sc->sc_lock);
1440 
1441 	ix = sc->sc_rcvdrx;
1442 	vrxp = &sc->sc_poll_sdmapage->rx[ix];
1443 	while (sc->sc_rcvcnt == 0) {
1444 		/* Wait receive */
1445 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1446 		    ix * sizeof(gtmpsc_pollrx_t),
1447 		    sizeof(sdma_desc_t),
1448 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1449 		csr = vrxp->rxdesc.sdma_csr;
1450 		if (csr & SDMA_CSR_RX_OWN) {
1451 			GT_MPSC_WRITE(sc, GTMPSC_CHRN(2),
1452 			    GTMPSC_CHR2_EH | GTMPSC_CHR2_CRD);
1453 			if (wdog_interval++ % 32)
1454 				gt_watchdog_service();
1455 			bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1456 			    ix * sizeof(gtmpsc_pollrx_t),
1457 			    sizeof(sdma_desc_t),
1458 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1459 			DELAY(50);
1460 			continue;
1461 		}
1462 		if (csr & SDMA_CSR_RX_ES)
1463 			aprint_error_dev(sc->sc_dev,
1464 			    "RX error, rxdesc csr 0x%x\n", csr);
1465 
1466 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
1467 		    ix * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),
1468 		    sizeof(vrxp->rxbuf),
1469 		    BUS_DMASYNC_POSTREAD);
1470 
1471 		vrxp->rxdesc.sdma_cnt &= SDMA_RX_CNT_BCNT_MASK;
1472 		sc->sc_rcvcnt = vrxp->rxdesc.sdma_cnt;
1473 		sc->sc_roffset = 0;
1474 		sc->sc_rcvdrx = (ix + 1) % GTMPSC_NRXDESC;
1475 
1476 		if (sc->sc_rcvcnt == 0) {
1477 			/* cleanup this descriptor, and return to DMA */
1478 			CLEANUP_AND_RETURN_RXDMA(sc, sc->sc_rcvrx);
1479 			sc->sc_rcvrx = sc->sc_rcvdrx;
1480 		}
1481 
1482 		ix = sc->sc_rcvdrx;
1483 		vrxp = &sc->sc_poll_sdmapage->rx[ix];
1484 	}
1485 	ch = vrxp->rxbuf[sc->sc_roffset++];
1486 	sc->sc_rcvcnt--;
1487 
1488 	if (sc->sc_roffset == vrxp->rxdesc.sdma_cnt) {
1489 		/* cleanup this descriptor, and return to DMA */
1490 		CLEANUP_AND_RETURN_RXDMA(sc, ix);
1491 		sc->sc_rcvrx = (ix + 1) % GTMPSC_NRXDESC;
1492 	}
1493 
1494 	gt_watchdog_service();
1495 
1496 	if (!cold)
1497 		mutex_spin_exit(&sc->sc_lock);
1498 	return ch;
1499 }
1500 
1501 STATIC void
gtmpsc_common_putc(struct gtmpsc_softc * sc,int c)1502 gtmpsc_common_putc(struct gtmpsc_softc *sc, int c)
1503 {
1504 	gtmpsc_polltx_t *vtxp;
1505 	int ix;
1506 	const int nc = 1;
1507 
1508 	/* Get a DMA descriptor */
1509 	if (!cold)
1510 		mutex_spin_enter(&sc->sc_lock);
1511 	ix = sc->sc_nexttx;
1512 	sc->sc_nexttx = (ix + 1) % GTMPSC_NTXDESC;
1513 	if (sc->sc_nexttx == sc->sc_lasttx) {
1514 		gtmpsc_common_putc_wait_complete(sc, sc->sc_lasttx);
1515 		sc->sc_lasttx = (sc->sc_lasttx + 1) % GTMPSC_NTXDESC;
1516 	}
1517 	if (!cold)
1518 		mutex_spin_exit(&sc->sc_lock);
1519 
1520 	vtxp = &sc->sc_poll_sdmapage->tx[ix];
1521 	vtxp->txbuf[0] = c;
1522 	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1523 	    ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t),
1524 	    sizeof(vtxp->txbuf),
1525 	    BUS_DMASYNC_PREWRITE);
1526 
1527 	vtxp->txdesc.sdma_cnt = (nc << SDMA_TX_CNT_BCNT_SHIFT) | nc;
1528 	vtxp->txdesc.sdma_csr = SDMA_CSR_TX_L | SDMA_CSR_TX_F | SDMA_CSR_TX_OWN;
1529 	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1530 	    ix * sizeof(gtmpsc_polltx_t),
1531 	    sizeof(sdma_desc_t),
1532 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1533 
1534 	if (!cold)
1535 		mutex_spin_enter(&sc->sc_lock);
1536 	/*
1537 	 * now kick some SDMA
1538 	 */
1539 	GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_TXD);
1540 
1541 	while (sc->sc_lasttx != sc->sc_nexttx) {
1542 		gtmpsc_common_putc_wait_complete(sc, sc->sc_lasttx);
1543 		sc->sc_lasttx = (sc->sc_lasttx + 1) % GTMPSC_NTXDESC;
1544 	}
1545 	if (!cold)
1546 		mutex_spin_exit(&sc->sc_lock);
1547 }
1548 
1549 /*
1550  * gtmpsc_common_putc - polled console putc
1551  */
1552 STATIC void
gtmpsc_common_putc_wait_complete(struct gtmpsc_softc * sc,int ix)1553 gtmpsc_common_putc_wait_complete(struct gtmpsc_softc *sc, int ix)
1554 {
1555 	gtmpsc_polltx_t *vtxp = &sc->sc_poll_sdmapage->tx[ix];
1556 	uint32_t csr;
1557 	int wdog_interval = 0;
1558 
1559 	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1560 	    ix * sizeof(gtmpsc_polltx_t),
1561 	    sizeof(sdma_desc_t),
1562 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1563 	csr = vtxp->txdesc.sdma_csr;
1564 	while (csr & SDMA_CSR_TX_OWN) {
1565 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1566 		    ix * sizeof(gtmpsc_polltx_t),
1567 		    sizeof(sdma_desc_t),
1568 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1569 		DELAY(40);
1570 		if (wdog_interval++ % 32)
1571 			gt_watchdog_service();
1572 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1573 		    ix * sizeof(gtmpsc_polltx_t),
1574 		    sizeof(sdma_desc_t),
1575 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1576 		csr = vtxp->txdesc.sdma_csr;
1577 	}
1578 	if (csr & SDMA_CSR_TX_ES)
1579 		aprint_error_dev(sc->sc_dev,
1580 		    "TX error, txdesc(%d) csr 0x%x\n", ix, csr);
1581 	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
1582 	    ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t),
1583 	    sizeof(vtxp->txbuf),
1584 	    BUS_DMASYNC_POSTWRITE);
1585 }
1586 #endif	/* defined(MPSC_CONSOLE) || defined(KGDB) */
1587