xref: /netbsd/sys/dev/qbus/if_uba.c (revision bf9ec67e)
1 /*	$NetBSD: if_uba.c,v 1.22 2001/11/13 07:11:24 lukem Exp $	*/
2 
3 /*
4  * Copyright (c) 1982, 1986, 1988 Regents of the University of California.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by the University of
18  *	California, Berkeley and its contributors.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)if_uba.c	7.16 (Berkeley) 12/16/90
36  */
37 
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: if_uba.c,v 1.22 2001/11/13 07:11:24 lukem Exp $");
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/malloc.h>
44 #include <sys/mbuf.h>
45 #include <sys/socket.h>
46 #include <sys/device.h>
47 
48 #include <net/if.h>
49 
50 #include <machine/bus.h>
51 
52 #include <dev/qbus/if_uba.h>
53 #include <dev/qbus/ubareg.h>
54 #include <dev/qbus/ubavar.h>
55 
56 static	struct mbuf *getmcl(void);
57 
58 /*
59  * Routines supporting UNIBUS network interfaces.
60  *
61  * TODO:
62  *	Support interfaces using only one BDP statically.
63  */
64 
65 /*
66  * Init UNIBUS for interface whose headers of size hlen are to
67  * end on a page boundary.  We allocate a UNIBUS map register for the page
68  * with the header, and nmr more UNIBUS map registers for i/o on the adapter,
69  * doing this once for each read and once for each write buffer.  We also
70  * allocate page frames in the mbuffer pool for these pages.
71  *
72  * Recent changes:
73  *	No special "header pages" anymore.
74  *	Recv packets are always put in clusters.
75  *	"size" is the maximum buffer size, may not be bigger than MCLBYTES.
76  */
77 int
78 if_ubaminit(struct ifubinfo *ifu, struct uba_softc *uh, int size,
79     struct ifrw *ifr, int nr, struct ifxmt *ifw, int nw)
80 {
81 	struct mbuf *m;
82 	int totsz, i, error, rseg, nm = nr;
83 	bus_dma_segment_t seg;
84 	caddr_t vaddr;
85 
86 #ifdef DIAGNOSTIC
87 	if (size > MCLBYTES)
88 		panic("if_ubaminit: size > MCLBYTES");
89 #endif
90 	ifu->iff_softc = uh;
91 	/*
92 	 * Get DMA memory for transmit buffers.
93 	 * Buffer size are rounded up to a multiple of the uba page size,
94 	 * then allocated contiguous.
95 	 */
96 	size = (size + UBA_PGOFSET) & ~UBA_PGOFSET;
97 	totsz = size * nw;
98 	if ((error = bus_dmamem_alloc(uh->uh_dmat, totsz, NBPG, 0,
99 	    &seg, 1, &rseg, BUS_DMA_NOWAIT)))
100 		return error;
101 	if ((error = bus_dmamem_map(uh->uh_dmat, &seg, rseg, totsz, &vaddr,
102 	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT))) {
103 		bus_dmamem_free(uh->uh_dmat, &seg, rseg);
104 		return error;
105 	}
106 
107 	/*
108 	 * Create receive and transmit maps.
109 	 * Alloc all resources now so we won't fail in the future.
110 	 */
111 
112 	for (i = 0; i < nr; i++) {
113 		if ((error = bus_dmamap_create(uh->uh_dmat, size, 1,
114 		    size, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
115 		    &ifr[i].ifrw_map))) {
116 			nr = i;
117 			nm = nw = 0;
118 			goto bad;
119 		}
120 	}
121 	for (i = 0; i < nw; i++) {
122 		if ((error = bus_dmamap_create(uh->uh_dmat, size, 1,
123 		    size, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
124 		    &ifw[i].ifw_map))) {
125 			nw = i;
126 			nm = 0;
127 			goto bad;
128 		}
129 	}
130 	/*
131 	 * Preload the rx maps with mbuf clusters.
132 	 */
133 	for (i = 0; i < nm; i++) {
134 		if ((m = getmcl()) == NULL) {
135 			nm = i;
136 			goto bad;
137 		}
138 		ifr[i].ifrw_mbuf = m;
139 		bus_dmamap_load(uh->uh_dmat, ifr[i].ifrw_map,
140 		    m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
141 
142 	}
143 	/*
144 	 * Load the tx maps with DMA memory (common case).
145 	 */
146 	for (i = 0; i < nw; i++) {
147 		ifw[i].ifw_vaddr = vaddr + size * i;
148 		ifw[i].ifw_size = size;
149 		bus_dmamap_load(uh->uh_dmat, ifw[i].ifw_map,
150 		    ifw[i].ifw_vaddr, ifw[i].ifw_size, NULL, BUS_DMA_NOWAIT);
151 	}
152 	return 0;
153 bad:
154 	while (--nm >= 0) {
155 		bus_dmamap_unload(uh->uh_dmat, ifr[nw].ifrw_map);
156 		m_freem(ifr[nm].ifrw_mbuf);
157 	}
158 	while (--nw >= 0)
159 		bus_dmamap_destroy(uh->uh_dmat, ifw[nw].ifw_map);
160 	while (--nr >= 0)
161 		bus_dmamap_destroy(uh->uh_dmat, ifr[nw].ifrw_map);
162 	return (0);
163 }
164 
165 struct mbuf *
166 getmcl()
167 {
168 	struct mbuf *m;
169 
170 	MGETHDR(m, M_DONTWAIT, MT_DATA);
171 	if (m == NULL)
172 		return 0;
173 	MCLGET(m, M_DONTWAIT);
174 	if ((m->m_flags & M_EXT) == 0) {
175 		m_freem(m);
176 		return 0;
177 	}
178 	return m;
179 }
180 
181 /*
182  * Pull read data off a interface.
183  * Totlen is length of data, with local net header stripped.
184  * When full cluster sized units are present
185  * on the interface on cluster boundaries we can get them more
186  * easily by remapping, and take advantage of this here.
187  * Save a pointer to the interface structure and the total length,
188  * so that protocols can determine where incoming packets arrived.
189  * Note: we may be called to receive from a transmit buffer by some
190  * devices.  In that case, we must force normal mapping of the buffer,
191  * so that the correct data will appear (only unibus maps are
192  * changed when remapping the transmit buffers).
193  */
194 struct mbuf *
195 if_ubaget(struct ifubinfo *ifu, struct ifrw *ifr, struct ifnet *ifp, int len)
196 {
197 	struct uba_softc *uh = ifu->iff_softc;
198 	struct mbuf *m, *mn;
199 
200 	if ((mn = getmcl()) == NULL)
201 		return NULL;	/* Leave the old */
202 
203 	bus_dmamap_unload(uh->uh_dmat, ifr->ifrw_map);
204 	m = ifr->ifrw_mbuf;
205 	ifr->ifrw_mbuf = mn;
206 	if ((bus_dmamap_load(uh->uh_dmat, ifr->ifrw_map,
207 	    mn->m_ext.ext_buf, mn->m_ext.ext_size, NULL, BUS_DMA_NOWAIT)))
208 		panic("if_ubaget"); /* Cannot happen */
209 	m->m_pkthdr.rcvif = ifp;
210 	m->m_len = m->m_pkthdr.len = len;
211 	return m;
212 }
213 
214 /*
215  * Called after a packet is sent. Releases hold resources.
216  */
217 void
218 if_ubaend(struct ifubinfo *ifu, struct ifxmt *ifw)
219 {
220 	struct uba_softc *uh = ifu->iff_softc;
221 
222 	if (ifw->ifw_flags & IFRW_MBUF) {
223 		bus_dmamap_unload(uh->uh_dmat, ifw->ifw_map);
224 		m_freem(ifw->ifw_mbuf);
225 		ifw->ifw_mbuf = NULL;
226 	}
227 }
228 
229 /*
230  * Map a chain of mbufs onto a network interface
231  * in preparation for an i/o operation.
232  * The argument chain of mbufs includes the local network
233  * header which is copied to be in the mapped, aligned
234  * i/o space.
235  */
236 int
237 if_ubaput(struct ifubinfo *ifu, struct ifxmt *ifw, struct mbuf *m)
238 {
239 	struct uba_softc *uh = ifu->iff_softc;
240 	int len;
241 
242 	if (/* m->m_next ==*/ 0) {
243 		/*
244 		 * Map the outgoing packet directly.
245 		 */
246 		if ((ifw->ifw_flags & IFRW_MBUF) == 0) {
247 			bus_dmamap_unload(uh->uh_dmat, ifw->ifw_map);
248 			ifw->ifw_flags |= IFRW_MBUF;
249 		}
250 		bus_dmamap_load(uh->uh_dmat, ifw->ifw_map, mtod(m, void *),
251 		    m->m_len, NULL, BUS_DMA_NOWAIT);
252 		ifw->ifw_mbuf = m;
253 		len = m->m_len;
254 	} else {
255 		if (ifw->ifw_flags & IFRW_MBUF) {
256 			bus_dmamap_load(uh->uh_dmat, ifw->ifw_map,
257 	                    ifw->ifw_vaddr, ifw->ifw_size,NULL,BUS_DMA_NOWAIT);
258 			ifw->ifw_flags &= ~IFRW_MBUF;
259 		}
260 		len = m->m_pkthdr.len;
261 		m_copydata(m, 0, m->m_pkthdr.len, ifw->ifw_vaddr);
262 		m_freem(m);
263 	}
264 	return len;
265 }
266