xref: /original-bsd/sys/vax/if/if_uba.c (revision e61fc7ea)
1 /*
2  * Copyright (c) 1982, 1986, 1988 Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)if_uba.c	7.15 (Berkeley) 06/28/90
8  */
9 
10 #include "param.h"
11 #include "systm.h"
12 #include "malloc.h"
13 #include "mbuf.h"
14 #include "map.h"
15 #include "buf.h"
16 #include "cmap.h"
17 #include "vmmac.h"
18 #include "socket.h"
19 #include "syslog.h"
20 
21 #include "../net/if.h"
22 
23 #include "../vax/pte.h"
24 #include "../vax/mtpr.h"
25 #include "if_uba.h"
26 #include "../vaxuba/ubareg.h"
27 #include "../vaxuba/ubavar.h"
28 
29 /*
30  * Routines supporting UNIBUS network interfaces.
31  *
32  * TODO:
33  *	Support interfaces using only one BDP statically.
34  */
35 
36 /*
37  * Init UNIBUS for interface on uban whose headers of size hlen are to
38  * end on a page boundary.  We allocate a UNIBUS map register for the page
39  * with the header, and nmr more UNIBUS map registers for i/o on the adapter,
40  * doing this once for each read and once for each write buffer.  We also
41  * allocate page frames in the mbuffer pool for these pages.
42  */
43 if_ubaminit(ifu, uban, hlen, nmr, ifr, nr, ifw, nw)
44 	register struct ifubinfo *ifu;
45 	int uban, hlen, nmr, nr, nw;
46 	register struct ifrw *ifr;
47 	register struct ifxmt *ifw;
48 {
49 	register caddr_t p;
50 	caddr_t cp;
51 	int i, nclbytes, off;
52 
53 	if (hlen)
54 		off = MCLBYTES - hlen;
55 	else
56 		off = 0;
57 	nclbytes = roundup(nmr * NBPG, MCLBYTES);
58 	if (hlen)
59 		nclbytes += MCLBYTES;
60 	if (ifr[0].ifrw_addr)
61 		cp = ifr[0].ifrw_addr - off;
62 	else {
63 		cp = (caddr_t)malloc((u_long)((nr + nw) * nclbytes), M_DEVBUF,
64 		    M_NOWAIT);
65 		if (cp == 0)
66 			return (0);
67 		p = cp;
68 		for (i = 0; i < nr; i++) {
69 			ifr[i].ifrw_addr = p + off;
70 			p += nclbytes;
71 		}
72 		for (i = 0; i < nw; i++) {
73 			ifw[i].ifw_base = p;
74 			ifw[i].ifw_addr = p + off;
75 			p += nclbytes;
76 		}
77 		ifu->iff_hlen = hlen;
78 		ifu->iff_uban = uban;
79 		ifu->iff_uba = uba_hd[uban].uh_uba;
80 		ifu->iff_ubamr = uba_hd[uban].uh_mr;
81 	}
82 	for (i = 0; i < nr; i++)
83 		if (if_ubaalloc(ifu, &ifr[i], nmr) == 0) {
84 			nr = i;
85 			nw = 0;
86 			goto bad;
87 		}
88 	for (i = 0; i < nw; i++)
89 		if (if_ubaalloc(ifu, &ifw[i].ifrw, nmr) == 0) {
90 			nw = i;
91 			goto bad;
92 		}
93 	while (--nw >= 0) {
94 		for (i = 0; i < nmr; i++)
95 			ifw[nw].ifw_wmap[i] = ifw[nw].ifw_mr[i];
96 		ifw[nw].ifw_xswapd = 0;
97 		ifw[nw].ifw_flags = IFRW_W;
98 		ifw[nw].ifw_nmr = nmr;
99 	}
100 	return (1);
101 bad:
102 	while (--nw >= 0)
103 		ubarelse(ifu->iff_uban, &ifw[nw].ifw_info);
104 	while (--nr >= 0)
105 		ubarelse(ifu->iff_uban, &ifr[nr].ifrw_info);
106 	free(cp, M_DEVBUF);
107 	ifr[0].ifrw_addr = 0;
108 	return (0);
109 }
110 
111 /*
112  * Setup an ifrw structure by allocating UNIBUS map registers,
113  * possibly a buffered data path, and initializing the fields of
114  * the ifrw structure to minimize run-time overhead.
115  */
116 static
117 if_ubaalloc(ifu, ifrw, nmr)
118 	struct ifubinfo *ifu;
119 	register struct ifrw *ifrw;
120 	int nmr;
121 {
122 	register int info;
123 
124 	info =
125 	    uballoc(ifu->iff_uban, ifrw->ifrw_addr, nmr*NBPG + ifu->iff_hlen,
126 	        ifu->iff_flags);
127 	if (info == 0)
128 		return (0);
129 	ifrw->ifrw_info = info;
130 	ifrw->ifrw_bdp = UBAI_BDP(info);
131 	ifrw->ifrw_proto = UBAMR_MRV | (UBAI_BDP(info) << UBAMR_DPSHIFT);
132 	ifrw->ifrw_mr = &ifu->iff_ubamr[UBAI_MR(info) + (ifu->iff_hlen? 1 : 0)];
133 	return (1);
134 }
135 
136 /*
137  * Pull read data off a interface.
138  * Totlen is length of data, with local net header stripped.
139  * Off is non-zero if a trailer protocol was used, and
140  * gives the offset of the trailer information.
141  * We copy the header from the trailer and then all the normal
142  * data into mbufs.  When full cluster sized units are present
143  * on the interface on cluster boundaries we can get them more
144  * easily by remapping, and take advantage of this here.
145  * Save a pointer to the interface structure and the total length,
146  * so that protocols can determine where incoming packets arrived.
147  * Note: we may be called to receive from a transmit buffer by some
148  * devices.  In that case, we must force normal mapping of the buffer,
149  * so that the correct data will appear (only unibus maps are
150  * changed when remapping the transmit buffers).
151  */
152 struct mbuf *
153 if_ubaget(ifu, ifr, totlen, off, ifp)
154 	struct ifubinfo *ifu;
155 	register struct ifrw *ifr;
156 	register int totlen;
157 	int off;
158 	struct ifnet *ifp;
159 {
160 	struct mbuf *top, **mp;
161 	register struct mbuf *m;
162 	register caddr_t cp = ifr->ifrw_addr + ifu->iff_hlen, pp;
163 	register int len;
164 	caddr_t epkt = cp + totlen;
165 
166 	top = 0;
167 	mp = &top;
168 	/*
169 	 * Skip the trailer header (type and trailer length).
170 	 */
171 	if (off) {
172 		off += 2 * sizeof(u_short);
173 		totlen -= 2 * sizeof(u_short);
174 		cp += off;
175 	}
176 	MGETHDR(m, M_DONTWAIT, MT_DATA);
177 	if (m == 0)
178 		return ((struct mbuf *)NULL);
179 	m->m_pkthdr.rcvif = ifp;
180 	m->m_pkthdr.len = totlen;
181 	m->m_len = MHLEN;
182 
183 	if (ifr->ifrw_flags & IFRW_W)
184 		rcv_xmtbuf((struct ifxmt *)ifr);
185 
186 	while (totlen > 0) {
187 		if (top) {
188 			MGET(m, M_DONTWAIT, MT_DATA);
189 			if (m == 0) {
190 				m_freem(top);
191 				top = 0;
192 				goto out;
193 			}
194 			m->m_len = MLEN;
195 		}
196 		len = min(totlen, epkt - cp);
197 		if (len >= MINCLSIZE) {
198 			struct pte *cpte, *ppte;
199 			int x, *ip, i;
200 
201 			MCLGET(m, M_DONTWAIT);
202 			if ((m->m_flags & M_EXT) == 0)
203 				goto nopage;
204 			len = min(len, MCLBYTES);
205 			m->m_len = len;
206 			if (!claligned(cp))
207 				goto copy;
208 
209 			/*
210 			 * Switch pages mapped to UNIBUS with new page pp,
211 			 * as quick form of copy.  Remap UNIBUS and invalidate.
212 			 */
213 			pp = mtod(m, char *);
214 			cpte = kvtopte(cp);
215 			ppte = kvtopte(pp);
216 			x = btop(cp - ifr->ifrw_addr);
217 			ip = (int *)&ifr->ifrw_mr[x];
218 			for (i = 0; i < MCLBYTES/NBPG; i++) {
219 				struct pte t;
220 				t = *ppte; *ppte++ = *cpte; *cpte = t;
221 				*ip++ = cpte++->pg_pfnum|ifr->ifrw_proto;
222 				mtpr(TBIS, cp);
223 				cp += NBPG;
224 				mtpr(TBIS, (caddr_t)pp);
225 				pp += NBPG;
226 			}
227 			goto nocopy;
228 		}
229 nopage:
230 		if (len < m->m_len) {
231 			/*
232 			 * Place initial small packet/header at end of mbuf.
233 			 */
234 			if (top == 0 && len + max_linkhdr <= m->m_len)
235 				m->m_data += max_linkhdr;
236 			m->m_len = len;
237 		} else
238 			len = m->m_len;
239 copy:
240 		bcopy(cp, mtod(m, caddr_t), (unsigned)len);
241 		cp += len;
242 nocopy:
243 		*mp = m;
244 		mp = &m->m_next;
245 		totlen -= len;
246 		if (cp == epkt)
247 			cp = ifr->ifrw_addr + ifu->iff_hlen;
248 	}
249 out:
250 	if (ifr->ifrw_flags & IFRW_W)
251 		restor_xmtbuf((struct ifxmt *)ifr);
252 	return (top);
253 }
254 
255 /*
256  * Change the mapping on a transmit buffer so that if_ubaget may
257  * receive from that buffer.  Copy data from any pages mapped to Unibus
258  * into the pages mapped to normal kernel virtual memory, so that
259  * they can be accessed and swapped as usual.  We take advantage
260  * of the fact that clusters are placed on the xtofree list
261  * in inverse order, finding the last one.
262  */
263 static
264 rcv_xmtbuf(ifw)
265 	register struct ifxmt *ifw;
266 {
267 	register struct mbuf *m;
268 	struct mbuf **mprev;
269 	register i;
270 	char *cp;
271 
272 	while (i = ffs((long)ifw->ifw_xswapd)) {
273 		cp = ifw->ifw_base + i * MCLBYTES;
274 		i--;
275 		ifw->ifw_xswapd &= ~(1<<i);
276 		mprev = &ifw->ifw_xtofree;
277 		for (m = ifw->ifw_xtofree; m && m->m_next; m = m->m_next)
278 			mprev = &m->m_next;
279 		if (m == NULL)
280 			break;
281 		bcopy(mtod(m, caddr_t), cp, MCLBYTES);
282 		(void) m_free(m);
283 		*mprev = NULL;
284 	}
285 	ifw->ifw_xswapd = 0;
286 	for (i = 0; i < ifw->ifw_nmr; i++)
287 		ifw->ifw_mr[i] = ifw->ifw_wmap[i];
288 }
289 
290 /*
291  * Put a transmit buffer back together after doing an if_ubaget on it,
292  * which may have swapped pages.
293  */
294 static
295 restor_xmtbuf(ifw)
296 	register struct ifxmt *ifw;
297 {
298 	register i;
299 
300 	for (i = 0; i < ifw->ifw_nmr; i++)
301 		ifw->ifw_wmap[i] = ifw->ifw_mr[i];
302 }
303 
304 /*
305  * Map a chain of mbufs onto a network interface
306  * in preparation for an i/o operation.
307  * The argument chain of mbufs includes the local network
308  * header which is copied to be in the mapped, aligned
309  * i/o space.
310  */
311 if_ubaput(ifu, ifw, m)
312 	struct ifubinfo *ifu;
313 	register struct ifxmt *ifw;
314 	register struct mbuf *m;
315 {
316 	register struct mbuf *mp;
317 	register caddr_t cp, dp;
318 	register int i;
319 	int xswapd = 0;
320 	int x, cc, t;
321 
322 	cp = ifw->ifw_addr;
323 	while (m) {
324 		dp = mtod(m, char *);
325 		if (claligned(cp) && claligned(dp) &&
326 		    (m->m_len == MCLBYTES || m->m_next == (struct mbuf *)0)) {
327 			struct pte *pte;
328 			int *ip;
329 
330 			pte = kvtopte(dp);
331 			x = btop(cp - ifw->ifw_addr);
332 			ip = (int *)&ifw->ifw_mr[x];
333 			for (i = 0; i < MCLBYTES/NBPG; i++)
334 				*ip++ = ifw->ifw_proto | pte++->pg_pfnum;
335 			xswapd |= 1 << (x>>(MCLSHIFT-PGSHIFT));
336 			mp = m->m_next;
337 			m->m_next = ifw->ifw_xtofree;
338 			ifw->ifw_xtofree = m;
339 			cp += m->m_len;
340 		} else {
341 			bcopy(mtod(m, caddr_t), cp, (unsigned)m->m_len);
342 			cp += m->m_len;
343 			MFREE(m, mp);
344 		}
345 		m = mp;
346 	}
347 
348 	/*
349 	 * Xswapd is the set of clusters we just mapped out.  Ifu->iff_xswapd
350 	 * is the set of clusters mapped out from before.  We compute
351 	 * the number of clusters involved in this operation in x.
352 	 * Clusters mapped out before and involved in this operation
353 	 * should be unmapped so original pages will be accessed by the device.
354 	 */
355 	cc = cp - ifw->ifw_addr;
356 	x = ((cc - ifu->iff_hlen) + MCLBYTES - 1) >> MCLSHIFT;
357 	ifw->ifw_xswapd &= ~xswapd;
358 	while (i = ffs((long)ifw->ifw_xswapd)) {
359 		i--;
360 		if (i >= x)
361 			break;
362 		ifw->ifw_xswapd &= ~(1<<i);
363 		i *= MCLBYTES/NBPG;
364 		for (t = 0; t < MCLBYTES/NBPG; t++) {
365 			ifw->ifw_mr[i] = ifw->ifw_wmap[i];
366 			i++;
367 		}
368 	}
369 	ifw->ifw_xswapd |= xswapd;
370 	return (cc);
371 }
372