xref: /original-bsd/sys/tahoe/tahoe/vm_machdep.c (revision bff07831)
1e8734b75Skarels /*
2e8734b75Skarels  * Copyright (c) 1988 Regents of the University of California.
3c3d90075Skarels  * All rights reserved.
4e8734b75Skarels  *
5c3d90075Skarels  * This code is derived from software contributed to Berkeley by
6c3d90075Skarels  * Computer Consoles Inc.
7c3d90075Skarels  *
8e469b5feSbostic  * %sccs.include.redist.c%
9c3d90075Skarels  *
10*bff07831Sbostic  *	@(#)vm_machdep.c	7.10 (Berkeley) 09/23/93
11e8734b75Skarels  */
12b63890e8Ssam 
130dce6c9dSbostic #include "sys/param.h"
140dce6c9dSbostic #include "sys/systm.h"
150dce6c9dSbostic #include "sys/user.h"
160dce6c9dSbostic #include "sys/proc.h"
170dce6c9dSbostic #include "sys/cmap.h"
180dce6c9dSbostic #include "sys/vm.h"
190dce6c9dSbostic #include "sys/text.h"
200dce6c9dSbostic #include "sys/kernel.h"
21b63890e8Ssam 
220dce6c9dSbostic #include "../include/pte.h"
230dce6c9dSbostic #include "../include/cpu.h"
240dce6c9dSbostic #include "../include/mtpr.h"
25b63890e8Ssam 
26b63890e8Ssam /*
27b63890e8Ssam  * Set a red zone in the kernel stack after the u. area.
28b63890e8Ssam  */
setredzone(pte,vaddr)29b63890e8Ssam setredzone(pte, vaddr)
30b63890e8Ssam 	register struct pte *pte;
31b63890e8Ssam 	caddr_t vaddr;
32b63890e8Ssam {
33b63890e8Ssam 
34b63890e8Ssam 	pte += (sizeof (struct user) + NBPG - 1) / NBPG;
35b63890e8Ssam 	*(int *)pte &= ~PG_PROT;
36b63890e8Ssam 	*(int *)pte |= PG_URKR;
37b63890e8Ssam 	if (vaddr)
380b8444b9Ssam 		mtpr(TBIS, vaddr + sizeof (struct user) + NBPG - 1);
39b63890e8Ssam }
40b63890e8Ssam 
41b63890e8Ssam /*
42b63890e8Ssam  * Check for valid program size
43477ef2c3Ssam  * NB - Check data and data growth separately as they may overflow
44477ef2c3Ssam  * when summed together.
45b63890e8Ssam  */
chksize(ts,ids,uds,ss)460b8444b9Ssam chksize(ts, ids, uds, ss)
470b8444b9Ssam 	register unsigned ts, ids, uds, ss;
48b63890e8Ssam {
49dd30bb3aSsam 	extern unsigned maxtsize;
50b63890e8Ssam 
51477ef2c3Ssam 	if (ctob(ts) > maxtsize ||
520b8444b9Ssam 	    ctob(ids) > u.u_rlimit[RLIMIT_DATA].rlim_cur ||
530b8444b9Ssam 	    ctob(uds) > u.u_rlimit[RLIMIT_DATA].rlim_cur ||
540b8444b9Ssam 	    ctob(ids + uds) > u.u_rlimit[RLIMIT_DATA].rlim_cur ||
550b8444b9Ssam 	    ctob(ss) > u.u_rlimit[RLIMIT_STACK].rlim_cur) {
56fc03226bSmckusick 		return (ENOMEM);
57b63890e8Ssam 	}
58b63890e8Ssam 	return (0);
59b63890e8Ssam }
60b63890e8Ssam 
61b63890e8Ssam /*ARGSUSED*/
newptes(pte,v,size)62b63890e8Ssam newptes(pte, v, size)
63b63890e8Ssam 	register struct pte *pte;
64b63890e8Ssam 	u_int v;
65b63890e8Ssam 	register int size;
66b63890e8Ssam {
67b63890e8Ssam 	register caddr_t a = ptob(v);
68b63890e8Ssam 
69b63890e8Ssam #ifdef lint
70b63890e8Ssam 	pte = pte;
71b63890e8Ssam #endif
72b63890e8Ssam 	if (size >= 8) {
730b8444b9Ssam 		mtpr(TBIA, 0);
74b63890e8Ssam 		return;
75b63890e8Ssam 	}
76b63890e8Ssam 	while (size > 0) {
770b8444b9Ssam 		mtpr(TBIS, a);
78b63890e8Ssam 		a += NBPG;
79b63890e8Ssam 		size--;
80b63890e8Ssam 	}
81b63890e8Ssam }
82b63890e8Ssam 
83b63890e8Ssam /*
84b63890e8Ssam  * Change protection codes of text segment.
85b63890e8Ssam  * Have to flush translation buffer since this
86b63890e8Ssam  * affect virtual memory mapping of current process.
87b63890e8Ssam  */
chgprot(addr,tprot)88b63890e8Ssam chgprot(addr, tprot)
89b63890e8Ssam 	caddr_t addr;
90b63890e8Ssam 	long tprot;
91b63890e8Ssam {
92b63890e8Ssam 	unsigned v;
93b63890e8Ssam 	int tp;
94b63890e8Ssam 	register struct pte *pte;
95b63890e8Ssam 	register struct cmap *c;
96b63890e8Ssam 
97b63890e8Ssam 	v = clbase(btop(addr));
98a729d905Smckusick 	if (!isatsv(u.u_procp, v))
99a729d905Smckusick 		return (EFAULT);
100b63890e8Ssam 	tp = vtotp(u.u_procp, v);
101b63890e8Ssam 	pte = tptopte(u.u_procp, tp);
102b63890e8Ssam 	if (pte->pg_fod == 0 && pte->pg_pfnum) {
103b63890e8Ssam 		c = &cmap[pgtocm(pte->pg_pfnum)];
104b0032fd4Smckusick 		if (c->c_blkno)
105b0032fd4Smckusick 			munhash(c->c_vp, (daddr_t)(u_long)c->c_blkno);
106b63890e8Ssam 	}
107b63890e8Ssam 	*(int *)pte &= ~PG_PROT;
108b63890e8Ssam 	*(int *)pte |= tprot;
109b63890e8Ssam 	distcl(pte);
110b63890e8Ssam 	tbiscl(v);
111a729d905Smckusick 	return (0);
112b63890e8Ssam }
113b63890e8Ssam 
settprot(tprot)114b63890e8Ssam settprot(tprot)
115b63890e8Ssam 	long tprot;
116b63890e8Ssam {
117b63890e8Ssam 	register int *ptaddr, i;
118b63890e8Ssam 
119b63890e8Ssam 	ptaddr = (int *)mfpr(P0BR);
120b63890e8Ssam 	for (i = 0; i < u.u_tsize; i++) {
121b63890e8Ssam 		ptaddr[i] &= ~PG_PROT;
122b63890e8Ssam 		ptaddr[i] |= tprot;
123b63890e8Ssam 	}
1240b8444b9Ssam 	mtpr(TBIA, 0);
125b63890e8Ssam }
126b63890e8Ssam 
1270b8444b9Ssam #ifdef notdef
128b63890e8Ssam /*
129b63890e8Ssam  * Rest are machine-dependent
130b63890e8Ssam  */
getmemc(addr)131b63890e8Ssam getmemc(addr)
132b63890e8Ssam 	caddr_t addr;
133b63890e8Ssam {
134b63890e8Ssam 	register int c;
135b63890e8Ssam 	struct pte savemap;
136b63890e8Ssam 
137b63890e8Ssam 	savemap = mmap[0];
138b63890e8Ssam 	*(int *)mmap = PG_V | PG_KR | btop(addr);
1390b8444b9Ssam 	mtpr(TBIS, vmmap);
140b63890e8Ssam 	uncache(&vmmap[(int)addr & PGOFSET]);
141b63890e8Ssam 	c = *(char *)&vmmap[(int)addr & PGOFSET];
142b63890e8Ssam 	mmap[0] = savemap;
1430b8444b9Ssam 	mtpr(TBIS, vmmap);
144b63890e8Ssam 	return (c & 0377);
145b63890e8Ssam }
146b63890e8Ssam 
putmemc(addr,val)147b63890e8Ssam putmemc(addr, val)
148b63890e8Ssam 	caddr_t addr;
149b63890e8Ssam {
150b63890e8Ssam 	struct pte savemap;
151b63890e8Ssam 
152b63890e8Ssam 	savemap = mmap[0];
153b63890e8Ssam 	*(int *)mmap = PG_V | PG_KW | btop(addr);
1540b8444b9Ssam 	mtpr(TBIS, vmmap);
155b63890e8Ssam 	*(char *)&vmmap[(int)addr & PGOFSET] = val;
156b63890e8Ssam 
1570b8444b9Ssam 	mtpr(PADC, 0);
1580b8444b9Ssam 	mtpr(PACC, 0);
159b63890e8Ssam 
160b63890e8Ssam 	mmap[0] = savemap;
1610b8444b9Ssam 	mtpr(TBIS, vmmap);
162b63890e8Ssam }
1630b8444b9Ssam #endif
164b63890e8Ssam 
165b63890e8Ssam /*
166b63890e8Ssam  * Move pages from one kernel virtual address to another.
167b63890e8Ssam  * Both addresses are assumed to reside in the Sysmap,
168b63890e8Ssam  * and size must be a multiple of CLSIZE.
169b63890e8Ssam  */
pagemove(from,to,size)170b63890e8Ssam pagemove(from, to, size)
171b63890e8Ssam 	register caddr_t from, to;
172b63890e8Ssam 	int size;
173b63890e8Ssam {
174b63890e8Ssam 	register struct pte *fpte, *tpte;
175b63890e8Ssam 
176b63890e8Ssam 	if (size % CLBYTES)
177b63890e8Ssam 		panic("pagemove");
178eab95c40Skarels 	fpte = kvtopte(from);
179eab95c40Skarels 	tpte = kvtopte(to);
180b63890e8Ssam 	while (size > 0) {
181b63890e8Ssam 		*tpte++ = *fpte;
182b63890e8Ssam 		*(int *)fpte++ = 0;
1830b8444b9Ssam 		mtpr(TBIS, from);
1840b8444b9Ssam 		mtpr(TBIS, to);
1850b8444b9Ssam 		mtpr(P1DC, to);		/* purge !! */
186b63890e8Ssam 		from += NBPG;
187b63890e8Ssam 		to += NBPG;
188b63890e8Ssam 		size -= NBPG;
189b63890e8Ssam 	}
190b63890e8Ssam }
191b63890e8Ssam 
1920b8444b9Ssam /*
1930b8444b9Ssam  * Code and data key management routines.
1940b8444b9Ssam  *
195ca0eba02Skarels  * The array ckey_cnt maintains the count of processes currently
196ca0eba02Skarels  * sharing each code key.  The array ckey_cache maintains a record
197ca0eba02Skarels  * of all code keys used since the last flush of the code cache.
198ca0eba02Skarels  * Such keys may not be reused, even if unreferenced, until
199ca0eba02Skarels  * the cache is flushed.  The data cache key handling is analogous.
2000b8444b9Ssam  * The arrays ckey_cnt and ckey_cache are allways kept in such a way
2010b8444b9Ssam  * that the following invariant holds:
2020b8444b9Ssam  *	ckey_cnt > 0	=>'s	ckey_cache == 1
2030b8444b9Ssam  * meaning as long as a code key is used by at least one process, it's
2040b8444b9Ssam  * marked as being 'in the cache'. Of course, the following invariant
2050b8444b9Ssam  * also holds:
2060b8444b9Ssam  *	ckey_cache == 0	=>'s	ckey_cnt == 0
2070b8444b9Ssam  * which is just the reciprocal of the 1'st invariant.
2080b8444b9Ssam  * Equivalent invariants hold for the data key arrays.
2090b8444b9Ssam  */
210ca0eba02Skarels struct	keystats ckeystats = { NCKEY - 1 };
211ca0eba02Skarels struct	keystats dkeystats = { NDKEY - 1 };
2120b8444b9Ssam 
2130b8444b9Ssam /*
2140b8444b9Ssam  * Release a code key.
2150b8444b9Ssam  */
ckeyrelease(key)2160b8444b9Ssam ckeyrelease(key)
2170b8444b9Ssam 	int key;
2180b8444b9Ssam {
2190b8444b9Ssam 	register int s;
2200b8444b9Ssam 
2210b8444b9Ssam 	s = spl8();
2220b8444b9Ssam 	if (--ckey_cnt[key] < 0) {
2230b8444b9Ssam 		printf("ckeyrelease: key = %d\n", key);
2240b8444b9Ssam 		ckey_cnt[key] = 0;
2250b8444b9Ssam 	}
226ca0eba02Skarels 	if (ckey_cnt[key] == 0)
227ca0eba02Skarels 		ckeystats.ks_dirty++;
2280b8444b9Ssam 	splx(s);
2290b8444b9Ssam }
2300b8444b9Ssam 
2310b8444b9Ssam /*
2320b8444b9Ssam  * Release a data key.
2330b8444b9Ssam  */
dkeyrelease(key)2340b8444b9Ssam dkeyrelease(key)
2350b8444b9Ssam 	int key;
2360b8444b9Ssam {
2370b8444b9Ssam 	register int s;
2380b8444b9Ssam 
2390b8444b9Ssam 	s = spl8();
2400b8444b9Ssam 	if (--dkey_cnt[key] != 0) {
2410b8444b9Ssam 		printf("dkeyrelease: key = %d\n", key);
2420b8444b9Ssam 		dkey_cnt[key] = 0;
2430b8444b9Ssam 	}
2440b8444b9Ssam 	splx(s);
245ca0eba02Skarels 	dkeystats.ks_dirty++;
2460b8444b9Ssam }
2470b8444b9Ssam 
248ca0eba02Skarels /*
249ca0eba02Skarels  * Invalidate the data cache for a process
250ca0eba02Skarels  * by exchanging cache keys.
251ca0eba02Skarels  */
dkeyinval(p)252ca0eba02Skarels dkeyinval(p)
253ca0eba02Skarels 	register struct proc *p;
254ca0eba02Skarels {
255ca0eba02Skarels 	int s;
256ca0eba02Skarels 
257ca0eba02Skarels 	dkeystats.ks_inval++;
258ca0eba02Skarels 	s = spl8();
259ca0eba02Skarels 	if (--dkey_cnt[p->p_dkey] != 0)
260ca0eba02Skarels 		dkey_cnt[p->p_dkey] = 0;
261b30906bbSkarels 	if (p == u.u_procp && !noproc) {
262ca0eba02Skarels 		p->p_dkey = getdatakey();
263ca0eba02Skarels 		mtpr(DCK, p->p_dkey);
264ca0eba02Skarels 	} else
265ca0eba02Skarels 		p->p_dkey = 0;
266ca0eba02Skarels 	splx(s);
267ca0eba02Skarels }
268ca0eba02Skarels 
2690b8444b9Ssam /*
2700b8444b9Ssam  * Get a code key.
27174e02d5fSkarels  * Strategy: try each of the following in turn
27274e02d5fSkarels  * until a key is allocated.
27374e02d5fSkarels  *
27474e02d5fSkarels  * 1) Find an unreferenced key not yet in the cache.
27574e02d5fSkarels  *    If this fails, a code cache purge will be necessary.
27674e02d5fSkarels  * 2) Find an unreferenced key.  Mark all unreferenced keys
27774e02d5fSkarels  *    as available and purge the cache.
27874e02d5fSkarels  * 3) Free the keys from all processes not sharing keys.
27974e02d5fSkarels  * 4) Free the keys from all processes.
2800b8444b9Ssam  */
getcodekey()2810b8444b9Ssam getcodekey()
2820b8444b9Ssam {
283ca0eba02Skarels 	register int i, s, freekey;
2840b8444b9Ssam 	register struct proc *p;
285ca0eba02Skarels 	int desparate = 0;
286ca0eba02Skarels 	static int lastkey = MAXCKEY;
2870b8444b9Ssam 
2880b8444b9Ssam 	ckeystats.ks_allocs++;
2890b8444b9Ssam 	s = spl8();
2900b8444b9Ssam 	freekey = 0;
291ca0eba02Skarels 	for (i = lastkey + 1; ; i++) {
292ca0eba02Skarels 		if (i > MAXCKEY)
293ca0eba02Skarels 			i = 1;
2940b8444b9Ssam 		if ((int)ckey_cache[i] == 0) {	/* free key, take it */
2950b8444b9Ssam 			ckey_cache[i] = 1, ckey_cnt[i] = 1;
2960b8444b9Ssam 			splx(s);
297ca0eba02Skarels 			ckeystats.ks_allocfree++;
298ca0eba02Skarels 			ckeystats.ks_avail--;
299ca0eba02Skarels 			lastkey = i;
3000b8444b9Ssam 			return (i);
3010b8444b9Ssam 		}
302ca0eba02Skarels 		if (ckey_cnt[i] == 0)		/* save for potential use */
3030b8444b9Ssam 			freekey = i;
304ca0eba02Skarels 		if (i == lastkey)
305ca0eba02Skarels 			break;
3060b8444b9Ssam 	}
3070b8444b9Ssam 	/*
3080b8444b9Ssam 	 * All code keys were marked as being in cache.
3090b8444b9Ssam 	 * If a key was in the cache, but not in use, grab it.
3100b8444b9Ssam 	 */
3110b8444b9Ssam 	if (freekey != 0) {
312ca0eba02Skarels purge:
3130b8444b9Ssam 		/*
314ca0eba02Skarels 		 * If we've run out of free keys,
3150b8444b9Ssam 		 * try and free up some other keys to avoid
3160b8444b9Ssam 		 * future cache purges.
3170b8444b9Ssam 		 */
3180b8444b9Ssam 		ckey_cnt[freekey] = 1, ckey_cache[freekey] = 1;
319ca0eba02Skarels 		for (i = 1; i <= MAXCKEY; i++)
320ca0eba02Skarels 			if (ckey_cnt[i] == 0) {
321ca0eba02Skarels 				ckey_cache[i] = 0;
322ca0eba02Skarels 				ckeystats.ks_avail++;
323ca0eba02Skarels 			}
3240b8444b9Ssam 		mtpr(PACC, 0);
3250b8444b9Ssam 		splx(s);
326ca0eba02Skarels 		ckeystats.ks_dirty = 0;
3270b8444b9Ssam 		ckeystats.ks_norefs++;
3280b8444b9Ssam 		return (freekey);
3290b8444b9Ssam 	}
3300b8444b9Ssam 
3310b8444b9Ssam 	/*
3320b8444b9Ssam 	 * All keys are marked as in the cache and in use.
33374e02d5fSkarels 	 * Release all unshared keys, or, on second pass,
33474e02d5fSkarels 	 * release all keys.
3350b8444b9Ssam 	 */
336ca0eba02Skarels steal:
337*bff07831Sbostic 	for (p = allproc; p; p = p->p_next)
338*bff07831Sbostic 		if (p->p_ckey != 0 && (p->p_flag & P_SYSTEM) == 0) {
3390b8444b9Ssam 			i = p->p_ckey;
340ca0eba02Skarels 			if (ckey_cnt[i] == 1 || desparate) {
3410b8444b9Ssam 				p->p_ckey = 0;
34274e02d5fSkarels 				if (--ckey_cnt[i] == 0) {
34374e02d5fSkarels 					freekey = i;
344ca0eba02Skarels 					if (p->p_textp)
345ca0eba02Skarels 						p->p_textp->x_ckey = 0;
346ca0eba02Skarels 				}
347ca0eba02Skarels 			}
3480b8444b9Ssam 		}
3490b8444b9Ssam 
350ca0eba02Skarels 	if (freekey) {
351ca0eba02Skarels 		ckeystats.ks_taken++;
352ca0eba02Skarels 		goto purge;
353ca0eba02Skarels 	} else {
354ca0eba02Skarels 		desparate++;
355ca0eba02Skarels 		goto steal;
3560b8444b9Ssam 	}
3570b8444b9Ssam }
3580b8444b9Ssam 
3590b8444b9Ssam /*
3600b8444b9Ssam  * Get a data key.
3610b8444b9Ssam  *
3620b8444b9Ssam  * General strategy:
3630b8444b9Ssam  * 1) Try to find a data key that isn't in the cache. Allocate it.
3640b8444b9Ssam  * 2) If all data keys are in the cache, find one which isn't
36574e02d5fSkarels  *    allocated.  Mark all unallocated keys as not in cache,
36674e02d5fSkarels  *    purge the cache, and allocate this one.
36774e02d5fSkarels  * 3) If all of them are allocated, free all process' keys
368ca0eba02Skarels  *    and let them reclaim then as they run.
3690b8444b9Ssam  */
getdatakey()3700b8444b9Ssam getdatakey()
3710b8444b9Ssam {
372ca0eba02Skarels 	register int i, freekey;
3730b8444b9Ssam 	register struct proc *p;
374ca0eba02Skarels 	int s;
375ca0eba02Skarels 	static int lastkey = MAXDKEY;
3760b8444b9Ssam 
3770b8444b9Ssam 	dkeystats.ks_allocs++;
3780b8444b9Ssam 	s = spl8();
3790b8444b9Ssam 	freekey = 0;
380ca0eba02Skarels 	for (i = lastkey + 1; ; i++) {
381ca0eba02Skarels 		if (i > MAXDKEY)
382ca0eba02Skarels 			i = 1;
3830b8444b9Ssam 		if ((int)dkey_cache[i] == 0) {	/* free key, take it */
3840b8444b9Ssam 			dkey_cache[i] = 1, dkey_cnt[i] = 1;
3850b8444b9Ssam 			splx(s);
386ca0eba02Skarels 			dkeystats.ks_allocfree++;
387ca0eba02Skarels 			dkeystats.ks_avail--;
388ca0eba02Skarels 			lastkey = i;
3890b8444b9Ssam 			return (i);
3900b8444b9Ssam 		}
391ca0eba02Skarels 		if (dkey_cnt[i] == 0)
3920b8444b9Ssam 			freekey = i;
393ca0eba02Skarels 		if (i == lastkey)
394ca0eba02Skarels 			break;
3950b8444b9Ssam 	}
396ca0eba02Skarels purge:
3970b8444b9Ssam 	if (freekey) {
3980b8444b9Ssam 		/*
3990b8444b9Ssam 		 * Try and free up some more keys to avoid
4000b8444b9Ssam 		 * future allocations causing a cache purge.
4010b8444b9Ssam 		 */
4020b8444b9Ssam 		dkey_cnt[freekey] = 1, dkey_cache[freekey] = 1;
403ca0eba02Skarels 		for (i = 1; i <= MAXDKEY; i++)
404ca0eba02Skarels 			if (dkey_cnt[i] == 0) {
405ca0eba02Skarels 				dkey_cache[i] = 0;
406ca0eba02Skarels 				dkeystats.ks_avail++;
407ca0eba02Skarels 			}
4080b8444b9Ssam 		mtpr(PADC, 0);
4090b8444b9Ssam 		splx(s);
4100b8444b9Ssam 		dkeystats.ks_norefs++;
411ca0eba02Skarels 		dkeystats.ks_dirty = 0;
4120b8444b9Ssam 		return (freekey);
4130b8444b9Ssam 	}
4140b8444b9Ssam 
4150b8444b9Ssam 	/*
41674e02d5fSkarels 	 * Now, we have to take a key from someone.
417ca0eba02Skarels 	 * May as well take them all, so we get them
418ca0eba02Skarels 	 * from all of the idle procs.
4190b8444b9Ssam 	 */
420*bff07831Sbostic 	for (p = allproc; p; p = p->p_next)
421*bff07831Sbostic 		if (p->p_dkey != 0 && (p->p_flag & P_SYSTEM) == 0) {
422ca0eba02Skarels 			freekey = p->p_dkey;
423ca0eba02Skarels 			dkey_cnt[freekey] = 0;
4240b8444b9Ssam 			p->p_dkey = 0;
4250b8444b9Ssam 		}
426ca0eba02Skarels 	dkeystats.ks_taken++;
427ca0eba02Skarels 	goto purge;
4280b8444b9Ssam }
4290b8444b9Ssam 
4300b8444b9Ssam /*VARGARGS1*/
vtoph(p,v)431b63890e8Ssam vtoph(p, v)
432b63890e8Ssam 	register struct proc *p;
433ca0eba02Skarels 	unsigned v;
434b63890e8Ssam {
4350b8444b9Ssam 	register struct pte *pte;
436ca0eba02Skarels 	register unsigned pg;
437b63890e8Ssam 
438ca0eba02Skarels 	pg = btop(v);
439ca0eba02Skarels 	if (pg >= BTOPKERNBASE)
440ca0eba02Skarels 		pte = &Sysmap[pg - BTOPKERNBASE];
441ca0eba02Skarels 	else
442ca0eba02Skarels 		pte = vtopte(p, pg);
4430b8444b9Ssam 	return ((pte->pg_pfnum << PGSHIFT) + (v & PGOFSET));
444b63890e8Ssam }
445