xref: /original-bsd/sys/vax/if/if_uba.c (revision 81f57ac7)
1 /*
2  * Copyright (c) 1982, 1986, 1988 Regents of the University of California.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms are permitted
6  * provided that the above copyright notice and this paragraph are
7  * duplicated in all such forms and that any documentation,
8  * advertising materials, and other materials related to such
9  * distribution and use acknowledge that the software was developed
10  * by the University of California, Berkeley.  The name of the
11  * University may not be used to endorse or promote products derived
12  * from this software without specific prior written permission.
13  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
14  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
15  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
16  *
17  *	@(#)if_uba.c	7.11 (Berkeley) 08/27/88
18  */
19 
20 #include "param.h"
21 #include "systm.h"
22 #include "malloc.h"
23 #include "mbuf.h"
24 #include "map.h"
25 #include "buf.h"
26 #include "cmap.h"
27 #include "vmmac.h"
28 #include "socket.h"
29 #include "syslog.h"
30 
31 #include "../net/if.h"
32 
33 #include "../vax/pte.h"
34 #include "../vax/mtpr.h"
35 #include "if_uba.h"
36 #include "../vaxuba/ubareg.h"
37 #include "../vaxuba/ubavar.h"
38 
39 /*
40  * Routines supporting UNIBUS network interfaces.
41  *
42  * TODO:
43  *	Support interfaces using only one BDP statically.
44  */
45 
46 /*
47  * Init UNIBUS for interface on uban whose headers of size hlen are to
48  * end on a page boundary.  We allocate a UNIBUS map register for the page
49  * with the header, and nmr more UNIBUS map registers for i/o on the adapter,
50  * doing this once for each read and once for each write buffer.  We also
51  * allocate page frames in the mbuffer pool for these pages.
52  */
53 if_ubaminit(ifu, uban, hlen, nmr, ifr, nr, ifw, nw)
54 	register struct ifubinfo *ifu;
55 	int uban, hlen, nmr, nr, nw;
56 	register struct ifrw *ifr;
57 	register struct ifxmt *ifw;
58 {
59 	register caddr_t p;
60 	caddr_t cp;
61 	int i, nclbytes, off;
62 
63 	if (hlen)
64 		off = MCLBYTES - hlen;
65 	else
66 		off = 0;
67 	nclbytes = roundup(nmr * NBPG, MCLBYTES);
68 	if (hlen)
69 		nclbytes += MCLBYTES;
70 	if (ifr[0].ifrw_addr)
71 		cp = ifr[0].ifrw_addr - off;
72 	else {
73 		cp = (caddr_t)malloc((u_long)((nr + nw) * nclbytes), M_DEVBUF,
74 		    M_NOWAIT);
75 		if (cp == 0)
76 			return (0);
77 		p = cp;
78 		for (i = 0; i < nr; i++) {
79 			ifr[i].ifrw_addr = p + off;
80 			p += nclbytes;
81 		}
82 		for (i = 0; i < nw; i++) {
83 			ifw[i].ifw_base = p;
84 			ifw[i].ifw_addr = p + off;
85 			p += nclbytes;
86 		}
87 		ifu->iff_hlen = hlen;
88 		ifu->iff_uban = uban;
89 		ifu->iff_uba = uba_hd[uban].uh_uba;
90 		ifu->iff_ubamr = uba_hd[uban].uh_mr;
91 	}
92 	for (i = 0; i < nr; i++)
93 		if (if_ubaalloc(ifu, &ifr[i], nmr) == 0) {
94 			nr = i;
95 			nw = 0;
96 			goto bad;
97 		}
98 	for (i = 0; i < nw; i++)
99 		if (if_ubaalloc(ifu, &ifw[i].ifrw, nmr) == 0) {
100 			nw = i;
101 			goto bad;
102 		}
103 	while (--nw >= 0) {
104 		for (i = 0; i < nmr; i++)
105 			ifw[nw].ifw_wmap[i] = ifw[nw].ifw_mr[i];
106 		ifw[nw].ifw_xswapd = 0;
107 		ifw[nw].ifw_flags = IFRW_W;
108 		ifw[nw].ifw_nmr = nmr;
109 	}
110 	return (1);
111 bad:
112 	while (--nw >= 0)
113 		ubarelse(ifu->iff_uban, &ifw[nw].ifw_info);
114 	while (--nr >= 0)
115 		ubarelse(ifu->iff_uban, &ifr[nr].ifrw_info);
116 	free(cp, M_DEVBUF);
117 	ifr[0].ifrw_addr = 0;
118 	return (0);
119 }
120 
121 /*
122  * Setup an ifrw structure by allocating UNIBUS map registers,
123  * possibly a buffered data path, and initializing the fields of
124  * the ifrw structure to minimize run-time overhead.
125  */
126 static
127 if_ubaalloc(ifu, ifrw, nmr)
128 	struct ifubinfo *ifu;
129 	register struct ifrw *ifrw;
130 	int nmr;
131 {
132 	register int info;
133 
134 	info =
135 	    uballoc(ifu->iff_uban, ifrw->ifrw_addr, nmr*NBPG + ifu->iff_hlen,
136 	        ifu->iff_flags);
137 	if (info == 0)
138 		return (0);
139 	ifrw->ifrw_info = info;
140 	ifrw->ifrw_bdp = UBAI_BDP(info);
141 	ifrw->ifrw_proto = UBAMR_MRV | (UBAI_BDP(info) << UBAMR_DPSHIFT);
142 	ifrw->ifrw_mr = &ifu->iff_ubamr[UBAI_MR(info) + (ifu->iff_hlen? 1 : 0)];
143 	return (1);
144 }
145 
146 /*
147  * Pull read data off a interface.
148  * Len is length of data, with local net header stripped.
149  * Off is non-zero if a trailer protocol was used, and
150  * gives the offset of the trailer information.
151  * We copy the trailer information and then all the normal
152  * data into mbufs.  When full cluster sized units are present
153  * on the interface on cluster boundaries we can get them more
154  * easily by remapping, and take advantage of this here.
155  * Prepend a pointer to the interface structure,
156  * so that protocols can determine where incoming packets arrived.
157  * Note: we may be called to receive from a transmit buffer by some
158  * devices.  In that case, we must force normal mapping of the buffer,
159  * so that the correct data will appear (only unibus maps are
160  * changed when remapping the transmit buffers).
161  */
162 struct mbuf *
163 if_ubaget(ifu, ifr, totlen, off0, ifp)
164 	struct ifubinfo *ifu;
165 	register struct ifrw *ifr;
166 	int totlen, off0;
167 	struct ifnet *ifp;
168 {
169 	struct mbuf *top, **mp;
170 	register struct mbuf *m;
171 	register caddr_t cp = ifr->ifrw_addr + ifu->iff_hlen, pp;
172 	register int len, off = off0;
173 
174 	top = 0;
175 	mp = &top;
176 	if (ifr->ifrw_flags & IFRW_W)
177 		rcv_xmtbuf((struct ifxmt *)ifr);
178 	while (totlen > 0) {
179 		if (top == 0) {
180 			MGETHDR(m, M_DONTWAIT, MT_DATA);
181 		} else {
182 			MGET(m, M_DONTWAIT, MT_DATA);
183 		}
184 		if (m == 0) {
185 			m_freem(top);
186 			top = 0;
187 			goto out;
188 		}
189 		if (off) {
190 			len = totlen - off;
191 			cp = ifr->ifrw_addr + ifu->iff_hlen + off;
192 		} else
193 			len = totlen;
194 		if (top == 0) {
195 			m->m_pkthdr.rcvif = ifp;
196 			m->m_pkthdr.len = totlen; /* should subtract trailer */
197 			m->m_len = MHLEN;
198 		} else
199 			m->m_len = MLEN;
200 		if (len >= MINCLSIZE) {
201 			struct pte *cpte, *ppte;
202 			int x, *ip, i;
203 
204 			MCLGET(m, M_DONTWAIT);
205 			if ((m->m_flags & M_EXT) == 0)
206 				goto nopage;
207 			len = MIN(len, MCLBYTES);
208 			m->m_len = len;
209 			if (!claligned(cp))
210 				goto copy;
211 
212 			/*
213 			 * Switch pages mapped to UNIBUS with new page pp,
214 			 * as quick form of copy.  Remap UNIBUS and invalidate.
215 			 */
216 			pp = mtod(m, char *);
217 			cpte = kvtopte(cp);
218 			ppte = kvtopte(pp);
219 			x = btop(cp - ifr->ifrw_addr);
220 			ip = (int *)&ifr->ifrw_mr[x];
221 			for (i = 0; i < MCLBYTES/NBPG; i++) {
222 				struct pte t;
223 				t = *ppte; *ppte++ = *cpte; *cpte = t;
224 				*ip++ = cpte++->pg_pfnum|ifr->ifrw_proto;
225 			/*	mtpr(TBIS, cp); */
226 				cp += NBPG;
227 				mtpr(TBIS, (caddr_t)pp);
228 				pp += NBPG;
229 			}
230 			goto nocopy;
231 		}
232 nopage:
233 		if (len < m->m_len) {
234 			/*
235 			 * Place initial small packet/header at end of mbuf.
236 			 */
237 			if (top == 0 && len + max_linkhdr <= m->m_len)
238 				m->m_data += max_linkhdr;
239 			m->m_len = len;
240 		} else
241 			len = m->m_len;
242 copy:
243 		bcopy(cp, mtod(m, caddr_t), (unsigned)len);
244 		cp += len;
245 nocopy:
246 		*mp = m;
247 		mp = &m->m_next;
248 		if (off) {
249 			/* sort of an ALGOL-W style for statement... */
250 			off += len;
251 			if (off == totlen) {
252 				cp = ifr->ifrw_addr + ifu->iff_hlen;
253 				off = 0;
254 				totlen = off0;
255 			}
256 		} else
257 			totlen -= len;
258 	}
259 out:
260 	if (ifr->ifrw_flags & IFRW_W)
261 		restor_xmtbuf((struct ifxmt *)ifr);
262 	return (top);
263 }
264 
265 /*
266  * Change the mapping on a transmit buffer so that if_ubaget may
267  * receive from that buffer.  Copy data from any pages mapped to Unibus
268  * into the pages mapped to normal kernel virtual memory, so that
269  * they can be accessed and swapped as usual.  We take advantage
270  * of the fact that clusters are placed on the xtofree list
271  * in inverse order, finding the last one.
272  */
273 static
274 rcv_xmtbuf(ifw)
275 	register struct ifxmt *ifw;
276 {
277 	register struct mbuf *m;
278 	struct mbuf **mprev;
279 	register i;
280 	char *cp;
281 
282 	while (i = ffs((long)ifw->ifw_xswapd)) {
283 		cp = ifw->ifw_base + i * MCLBYTES;
284 		i--;
285 		ifw->ifw_xswapd &= ~(1<<i);
286 		mprev = &ifw->ifw_xtofree;
287 		for (m = ifw->ifw_xtofree; m && m->m_next; m = m->m_next)
288 			mprev = &m->m_next;
289 		if (m == NULL)
290 			break;
291 		bcopy(mtod(m, caddr_t), cp, MCLBYTES);
292 		(void) m_free(m);
293 		*mprev = NULL;
294 	}
295 	ifw->ifw_xswapd = 0;
296 	for (i = 0; i < ifw->ifw_nmr; i++)
297 		ifw->ifw_mr[i] = ifw->ifw_wmap[i];
298 }
299 
300 /*
301  * Put a transmit buffer back together after doing an if_ubaget on it,
302  * which may have swapped pages.
303  */
304 static
305 restor_xmtbuf(ifw)
306 	register struct ifxmt *ifw;
307 {
308 	register i;
309 
310 	for (i = 0; i < ifw->ifw_nmr; i++)
311 		ifw->ifw_wmap[i] = ifw->ifw_mr[i];
312 }
313 
314 /*
315  * Map a chain of mbufs onto a network interface
316  * in preparation for an i/o operation.
317  * The argument chain of mbufs includes the local network
318  * header which is copied to be in the mapped, aligned
319  * i/o space.
320  */
321 if_ubaput(ifu, ifw, m)
322 	struct ifubinfo *ifu;
323 	register struct ifxmt *ifw;
324 	register struct mbuf *m;
325 {
326 	register struct mbuf *mp;
327 	register caddr_t cp, dp;
328 	register int i;
329 	int xswapd = 0;
330 	int x, cc, t;
331 
332 	cp = ifw->ifw_addr;
333 	while (m) {
334 		dp = mtod(m, char *);
335 		if (claligned(cp) && claligned(dp) &&
336 		    (m->m_len == MCLBYTES || m->m_next == (struct mbuf *)0)) {
337 			struct pte *pte;
338 			int *ip;
339 
340 			pte = kvtopte(dp);
341 			x = btop(cp - ifw->ifw_addr);
342 			ip = (int *)&ifw->ifw_mr[x];
343 			for (i = 0; i < MCLBYTES/NBPG; i++)
344 				*ip++ = ifw->ifw_proto | pte++->pg_pfnum;
345 			xswapd |= 1 << (x>>(MCLSHIFT-PGSHIFT));
346 			mp = m->m_next;
347 			m->m_next = ifw->ifw_xtofree;
348 			ifw->ifw_xtofree = m;
349 			cp += m->m_len;
350 		} else {
351 			bcopy(mtod(m, caddr_t), cp, (unsigned)m->m_len);
352 			cp += m->m_len;
353 			MFREE(m, mp);
354 		}
355 		m = mp;
356 	}
357 
358 	/*
359 	 * Xswapd is the set of clusters we just mapped out.  Ifu->iff_xswapd
360 	 * is the set of clusters mapped out from before.  We compute
361 	 * the number of clusters involved in this operation in x.
362 	 * Clusters mapped out before and involved in this operation
363 	 * should be unmapped so original pages will be accessed by the device.
364 	 */
365 	cc = cp - ifw->ifw_addr;
366 	x = ((cc - ifu->iff_hlen) + MCLBYTES - 1) >> MCLSHIFT;
367 	ifw->ifw_xswapd &= ~xswapd;
368 	while (i = ffs((long)ifw->ifw_xswapd)) {
369 		i--;
370 		if (i >= x)
371 			break;
372 		ifw->ifw_xswapd &= ~(1<<i);
373 		i *= MCLBYTES/NBPG;
374 		for (t = 0; t < MCLBYTES/NBPG; t++) {
375 			ifw->ifw_mr[i] = ifw->ifw_wmap[i];
376 			i++;
377 		}
378 	}
379 	ifw->ifw_xswapd |= xswapd;
380 	return (cc);
381 }
382