xref: /original-bsd/sys/vax/if/if_uba.c (revision b7bd495c)
1 /*
2  * Copyright (c) 1982, 1986 Regents of the University of California.
3  * All rights reserved.  The Berkeley software License Agreement
4  * specifies the terms and conditions for redistribution.
5  *
6  *	@(#)if_uba.c	7.7 (Berkeley) 10/22/87
7  */
8 
9 #include "../machine/pte.h"
10 
11 #include "param.h"
12 #include "systm.h"
13 #include "mbuf.h"
14 #include "map.h"
15 #include "buf.h"
16 #include "cmap.h"
17 #include "vmmac.h"
18 #include "socket.h"
19 #include "syslog.h"
20 #include "malloc.h"
21 
22 #include "../net/if.h"
23 
24 #include "../vax/mtpr.h"
25 #include "if_uba.h"
26 #include "../vaxuba/ubareg.h"
27 #include "../vaxuba/ubavar.h"
28 
29 /*
30  * Routines supporting UNIBUS network interfaces.
31  *
32  * TODO:
33  *	Support interfaces using only one BDP statically.
34  */
35 
36 /*
37  * Init UNIBUS for interface on uban whose headers of size hlen are to
38  * end on a page boundary.  We allocate a UNIBUS map register for the page
39  * with the header, and nmr more UNIBUS map registers for i/o on the adapter,
40  * doing this once for each read and once for each write buffer.  We also
41  * allocate page frames in the mbuffer pool for these pages.
42  */
43 if_ubaminit(ifu, uban, hlen, nmr, ifr, nr, ifw, nw)
44 	register struct ifubinfo *ifu;
45 	int uban, hlen, nmr, nr, nw;
46 	register struct ifrw *ifr;
47 	register struct ifxmt *ifw;
48 {
49 	register caddr_t p;
50 	caddr_t cp;
51 	int i, nclbytes, off;
52 
53 	if (hlen)
54 		off = CLBYTES - hlen;
55 	else
56 		off = 0;
57 	nclbytes = CLBYTES * (clrnd(nmr) / CLSIZE);
58 	if (hlen)
59 		nclbytes += CLBYTES;
60 	if (ifr[0].ifrw_addr)
61 		cp = ifr[0].ifrw_addr - off;
62 	else {
63 		cp = (caddr_t)malloc((nr + nw) * nclbytes, M_DEVBUF, M_NOWAIT);
64 		if (cp == 0)
65 			return (0);
66 		p = cp;
67 		for (i = 0; i < nr; i++) {
68 			ifr[i].ifrw_addr = p + off;
69 			p += nclbytes;
70 		}
71 		for (i = 0; i < nw; i++) {
72 			ifw[i].ifw_base = p;
73 			ifw[i].ifw_addr = p + off;
74 			p += nclbytes;
75 		}
76 		ifu->iff_hlen = hlen;
77 		ifu->iff_uban = uban;
78 		ifu->iff_uba = uba_hd[uban].uh_uba;
79 		ifu->iff_ubamr = uba_hd[uban].uh_mr;
80 	}
81 	for (i = 0; i < nr; i++)
82 		if (if_ubaalloc(ifu, &ifr[i], nmr) == 0) {
83 			nr = i;
84 			nw = 0;
85 			goto bad;
86 		}
87 	for (i = 0; i < nw; i++)
88 		if (if_ubaalloc(ifu, &ifw[i].ifrw, nmr) == 0) {
89 			nw = i;
90 			goto bad;
91 		}
92 	while (--nw >= 0) {
93 		for (i = 0; i < nmr; i++)
94 			ifw[nw].ifw_wmap[i] = ifw[nw].ifw_mr[i];
95 		ifw[nw].ifw_xswapd = 0;
96 		ifw[nw].ifw_flags = IFRW_W;
97 		ifw[nw].ifw_nmr = nmr;
98 	}
99 	return (1);
100 bad:
101 	while (--nw >= 0)
102 		ubarelse(ifu->iff_uban, &ifw[nw].ifw_info);
103 	while (--nr >= 0)
104 		ubarelse(ifu->iff_uban, &ifr[nr].ifrw_info);
105 	free(cp, M_DEVBUF);
106 	ifr[0].ifrw_addr = 0;
107 	return (0);
108 }
109 
110 /*
111  * Setup an ifrw structure by allocating UNIBUS map registers,
112  * possibly a buffered data path, and initializing the fields of
113  * the ifrw structure to minimize run-time overhead.
114  */
115 static
116 if_ubaalloc(ifu, ifrw, nmr)
117 	struct ifubinfo *ifu;
118 	register struct ifrw *ifrw;
119 	int nmr;
120 {
121 	register int info;
122 
123 	info =
124 	    uballoc(ifu->iff_uban, ifrw->ifrw_addr, nmr*NBPG + ifu->iff_hlen,
125 	        ifu->iff_flags);
126 	if (info == 0)
127 		return (0);
128 	ifrw->ifrw_info = info;
129 	ifrw->ifrw_bdp = UBAI_BDP(info);
130 	ifrw->ifrw_proto = UBAMR_MRV | (UBAI_BDP(info) << UBAMR_DPSHIFT);
131 	ifrw->ifrw_mr = &ifu->iff_ubamr[UBAI_MR(info) + (ifu->iff_hlen? 1 : 0)];
132 	return (1);
133 }
134 
135 /*
136  * Pull read data off a interface.
137  * Len is length of data, with local net header stripped.
138  * Off is non-zero if a trailer protocol was used, and
139  * gives the offset of the trailer information.
140  * We copy the trailer information and then all the normal
141  * data into mbufs.  When full cluster sized units are present
142  * on the interface on cluster boundaries we can get them more
143  * easily by remapping, and take advantage of this here.
144  * Prepend a pointer to the interface structure,
145  * so that protocols can determine where incoming packets arrived.
146  * Note: we may be called to receive from a transmit buffer by some
147  * devices.  In that case, we must force normal mapping of the buffer,
148  * so that the correct data will appear (only unibus maps are
149  * changed when remapping the transmit buffers).
150  */
151 struct mbuf *
152 if_ubaget(ifu, ifr, totlen, off0, ifp)
153 	struct ifubinfo *ifu;
154 	register struct ifrw *ifr;
155 	int totlen, off0;
156 	struct ifnet *ifp;
157 {
158 	struct mbuf *top, **mp;
159 	register struct mbuf *m;
160 	int off = off0, len;
161 	register caddr_t cp = ifr->ifrw_addr + ifu->iff_hlen, pp;
162 
163 	top = 0;
164 	mp = &top;
165 	if (ifr->ifrw_flags & IFRW_W)
166 		rcv_xmtbuf((struct ifxmt *)ifr);
167 	while (totlen > 0) {
168 		MGET(m, M_DONTWAIT, MT_DATA);
169 		if (m == 0) {
170 			m_freem(top);
171 			top = 0;
172 			goto out;
173 		}
174 		if (off) {
175 			len = totlen - off;
176 			cp = ifr->ifrw_addr + ifu->iff_hlen + off;
177 		} else
178 			len = totlen;
179 		if (len >= CLBYTES/2) {
180 			struct pte *cpte, *ppte;
181 			int x, *ip, i;
182 
183 			/*
184 			 * If doing the first mbuf and
185 			 * the interface pointer hasn't been put in,
186 			 * put it in a separate mbuf to preserve alignment.
187 			 */
188 			if (ifp) {
189 				len = 0;
190 				goto nopage;
191 			}
192 			MCLGET(m);
193 			if (m->m_len != CLBYTES)
194 				goto nopage;
195 			m->m_len = MIN(len, CLBYTES);
196 			if (!claligned(cp))
197 				goto copy;
198 
199 			/*
200 			 * Switch pages mapped to UNIBUS with new page pp,
201 			 * as quick form of copy.  Remap UNIBUS and invalidate.
202 			 */
203 			pp = mtod(m, char *);
204 			cpte = &Mbmap[mtocl(cp)*CLSIZE];
205 			ppte = &Mbmap[mtocl(pp)*CLSIZE];
206 			x = btop(cp - ifr->ifrw_addr);
207 			ip = (int *)&ifr->ifrw_mr[x];
208 			for (i = 0; i < CLSIZE; i++) {
209 				struct pte t;
210 				t = *ppte; *ppte++ = *cpte; *cpte = t;
211 				*ip++ =
212 				    cpte++->pg_pfnum|ifr->ifrw_proto;
213 				mtpr(TBIS, cp);
214 				cp += NBPG;
215 				mtpr(TBIS, (caddr_t)pp);
216 				pp += NBPG;
217 			}
218 			goto nocopy;
219 		}
220 nopage:
221 		m->m_off = MMINOFF;
222 		if (ifp) {
223 			/*
224 			 * Leave room for ifp.
225 			 */
226 			m->m_len = MIN(MLEN - sizeof(ifp), len);
227 			m->m_off += sizeof(ifp);
228 		} else
229 			m->m_len = MIN(MLEN, len);
230 copy:
231 		bcopy(cp, mtod(m, caddr_t), (unsigned)m->m_len);
232 		cp += m->m_len;
233 nocopy:
234 		*mp = m;
235 		mp = &m->m_next;
236 		if (off) {
237 			/* sort of an ALGOL-W style for statement... */
238 			off += m->m_len;
239 			if (off == totlen) {
240 				cp = ifr->ifrw_addr + ifu->iff_hlen;
241 				off = 0;
242 				totlen = off0;
243 			}
244 		} else
245 			totlen -= m->m_len;
246 		if (ifp) {
247 			/*
248 			 * Prepend interface pointer to first mbuf.
249 			 */
250 			m->m_len += sizeof(ifp);
251 			m->m_off -= sizeof(ifp);
252 			*(mtod(m, struct ifnet **)) = ifp;
253 			ifp = (struct ifnet *)0;
254 		}
255 	}
256 out:
257 	if (ifr->ifrw_flags & IFRW_W)
258 		restor_xmtbuf((struct ifxmt *)ifr);
259 	return (top);
260 }
261 
262 /*
263  * Change the mapping on a transmit buffer so that if_ubaget may
264  * receive from that buffer.  Copy data from any pages mapped to Unibus
265  * into the pages mapped to normal kernel virtual memory, so that
266  * they can be accessed and swapped as usual.  We take advantage
267  * of the fact that clusters are placed on the xtofree list
268  * in inverse order, finding the last one.
269  */
270 static
271 rcv_xmtbuf(ifw)
272 	register struct ifxmt *ifw;
273 {
274 	register struct mbuf *m;
275 	struct mbuf **mprev;
276 	register i;
277 	char *cp;
278 
279 	while (i = ffs((long)ifw->ifw_xswapd)) {
280 		cp = ifw->ifw_base + i * CLBYTES;
281 		i--;
282 		ifw->ifw_xswapd &= ~(1<<i);
283 		mprev = &ifw->ifw_xtofree;
284 		for (m = ifw->ifw_xtofree; m && m->m_next; m = m->m_next)
285 			mprev = &m->m_next;
286 		if (m == NULL)
287 			break;
288 		bcopy(mtod(m, caddr_t), cp, CLBYTES);
289 		(void) m_free(m);
290 		*mprev = NULL;
291 	}
292 	ifw->ifw_xswapd = 0;
293 	for (i = 0; i < ifw->ifw_nmr; i++)
294 		ifw->ifw_mr[i] = ifw->ifw_wmap[i];
295 }
296 
297 /*
298  * Put a transmit buffer back together after doing an if_ubaget on it,
299  * which may have swapped pages.
300  */
301 static
302 restor_xmtbuf(ifw)
303 	register struct ifxmt *ifw;
304 {
305 	register i;
306 
307 	for (i = 0; i < ifw->ifw_nmr; i++)
308 		ifw->ifw_wmap[i] = ifw->ifw_mr[i];
309 }
310 
311 /*
312  * Map a chain of mbufs onto a network interface
313  * in preparation for an i/o operation.
314  * The argument chain of mbufs includes the local network
315  * header which is copied to be in the mapped, aligned
316  * i/o space.
317  */
318 if_ubaput(ifu, ifw, m)
319 	struct ifubinfo *ifu;
320 	register struct ifxmt *ifw;
321 	register struct mbuf *m;
322 {
323 	register struct mbuf *mp;
324 	register caddr_t cp, dp;
325 	register int i;
326 	int xswapd = 0;
327 	int x, cc, t;
328 
329 	cp = ifw->ifw_addr;
330 	while (m) {
331 		dp = mtod(m, char *);
332 		if (claligned(cp) && claligned(dp) &&
333 		    (m->m_len == CLBYTES || m->m_next == (struct mbuf *)0)) {
334 			struct pte *pte; int *ip;
335 			pte = &Mbmap[mtocl(dp)*CLSIZE];
336 			x = btop(cp - ifw->ifw_addr);
337 			ip = (int *)&ifw->ifw_mr[x];
338 			for (i = 0; i < CLSIZE; i++)
339 				*ip++ =
340 				    ifw->ifw_proto | pte++->pg_pfnum;
341 			xswapd |= 1 << (x>>(CLSHIFT-PGSHIFT));
342 			mp = m->m_next;
343 			m->m_next = ifw->ifw_xtofree;
344 			ifw->ifw_xtofree = m;
345 			cp += m->m_len;
346 		} else {
347 			bcopy(mtod(m, caddr_t), cp, (unsigned)m->m_len);
348 			cp += m->m_len;
349 			MFREE(m, mp);
350 		}
351 		m = mp;
352 	}
353 
354 	/*
355 	 * Xswapd is the set of clusters we just mapped out.  Ifu->iff_xswapd
356 	 * is the set of clusters mapped out from before.  We compute
357 	 * the number of clusters involved in this operation in x.
358 	 * Clusters mapped out before and involved in this operation
359 	 * should be unmapped so original pages will be accessed by the device.
360 	 */
361 	cc = cp - ifw->ifw_addr;
362 	x = ((cc - ifu->iff_hlen) + CLBYTES - 1) >> CLSHIFT;
363 	ifw->ifw_xswapd &= ~xswapd;
364 	while (i = ffs((long)ifw->ifw_xswapd)) {
365 		i--;
366 		if (i >= x)
367 			break;
368 		ifw->ifw_xswapd &= ~(1<<i);
369 		i *= CLSIZE;
370 		for (t = 0; t < CLSIZE; t++) {
371 			ifw->ifw_mr[i] = ifw->ifw_wmap[i];
372 			i++;
373 		}
374 	}
375 	ifw->ifw_xswapd |= xswapd;
376 	return (cc);
377 }
378