xref: /netbsd/sys/arch/vax/vax/pmap.c (revision 8cd451ff)
1 /*	$NetBSD: pmap.c,v 1.196 2023/03/26 12:21:09 ragge Exp $	   */
2 /*
3  * Copyright (c) 1994, 1998, 1999, 2003 Ludd, University of Lule}, Sweden.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.196 2023/03/26 12:21:09 ragge Exp $");
29 
30 #include "opt_ddb.h"
31 #include "opt_cputype.h"
32 #include "opt_modular.h"
33 #include "opt_multiprocessor.h"
34 #include "opt_lockdebug.h"
35 #include "opt_pipe.h"
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/buf.h>
40 #include <sys/cpu.h>
41 #include <sys/device.h>
42 #include <sys/extent.h>
43 #include <sys/proc.h>
44 #include <sys/atomic.h>
45 #include <sys/kmem.h>
46 #include <sys/mutex.h>
47 
48 #include <uvm/uvm.h>
49 #include <uvm/uvm_physseg.h>
50 
51 #ifdef PMAPDEBUG
52 #include <dev/cons.h>
53 #endif
54 
55 #include <machine/macros.h>
56 #include <machine/sid.h>
57 #include <machine/scb.h>
58 #include <machine/rpb.h>
59 
60 /* QDSS console mapping hack */
61 #include "qd.h"
62 void	qdearly(void);
63 
64 /*
65  * This code uses bitfield operators for most page table entries.
66  */
67 #define PROTSHIFT	27
68 #define PROT_KW		(PG_KW >> PROTSHIFT)
69 #define PROT_KR		(PG_KR >> PROTSHIFT)
70 #define PROT_RW		(PG_RW >> PROTSHIFT)
71 #define PROT_RO		(PG_RO >> PROTSHIFT)
72 #define PROT_URKW	(PG_URKW >> PROTSHIFT)
73 
74 /*
75  * Scratch pages usage:
76  * Page 1: initial frame pointer during autoconfig. Stack and pcb for
77  *	   processes during exit on boot CPU only.
78  * Page 2: cpu_info struct for any CPU.
79  * Page 3: unused
80  * Page 4: unused
81  */
82 uintptr_t scratch;
83 #define SCRATCHPAGES	4
84 
85 
86 static struct pmap kernel_pmap_store;
87 struct pmap *const kernel_pmap_ptr = &kernel_pmap_store;
88 
89 struct	pte *Sysmap;		/* System page table */
90 struct	pv_entry *pv_table;	/* array of entries, one per LOGICAL page */
91 u_int	pventries;
92 u_int	pvinuse;
93 vaddr_t iospace;
94 
95 vaddr_t ptemapstart, ptemapend;
96 struct	extent *ptemap;
97 #define PTMAPSZ EXTENT_FIXED_STORAGE_SIZE(100)
98 char	ptmapstorage[PTMAPSZ];
99 
100 extern	void *msgbufaddr;
101 
102 #define IOSPACE_P(p)	(((u_long)(p) & 0xe0000000) != 0)
103 #define NPTEPROCSPC	0x1000	/* # of virtual PTEs per process space */
104 #define NPTEPG		0x80	/* # of PTEs per page (logical or physical) */
105 #define PPTESZ		sizeof(struct pte)
106 #define NOVADDR		0xffffffff /* Illegal virtual address */
107 #define NPTEPERREG	0x200000
108 
109 #define	SEGTYPE(x)	(((unsigned int)(x)) >> 30)
110 #define	P0SEG		0
111 #define P1SEG		1
112 #define	SYSSEG		2
113 
114 static inline void
pmap_decrement_stats(struct pmap * pm,bool wired)115 pmap_decrement_stats(struct pmap *pm, bool wired)
116 {
117 	pm->pm_stats.resident_count--;
118 	if (wired)
119 		pm->pm_stats.wired_count--;
120 }
121 
122 /*
123  * Map in a virtual page.
124  */
125 static inline void
mapin8(int * ptep,long pte)126 mapin8(int *ptep, long pte)
127 {
128 	ptep[0] = pte;
129 	ptep[1] = pte+1;
130 	ptep[2] = pte+2;
131 	ptep[3] = pte+3;
132 	ptep[4] = pte+4;
133 	ptep[5] = pte+5;
134 	ptep[6] = pte+6;
135 	ptep[7] = pte+7;
136 }
137 
138 /*
139  * Check if page table page is in use.
140  */
141 static inline int
ptpinuse(void * pte)142 ptpinuse(void *pte)
143 {
144 	int *pve = (int *)vax_trunc_page(pte);
145 	int i;
146 
147 	for (i = 0; i < NPTEPG; i += 8)
148 		if (pve[i] != 0)
149 			return 1;
150 	return 0;
151 }
152 
153 #ifdef PMAPDEBUG
154 #define PMDEBUG(x) if (startpmapdebug)printf x
155 #else
156 #define PMDEBUG(x)
157 #endif
158 
159 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
160 static kmutex_t pmap_lock;
161 #define PMAP_LOCK	mutex_spin_enter(&pmap_lock);
162 #define PMAP_UNLOCK	mutex_spin_enter(&pmap_lock);
163 #else
164 #define PMAP_LOCK
165 #define PMAP_UNLOCK
166 #endif
167 
168 #ifdef PMAPDEBUG
169 int	startpmapdebug = 0;
170 #endif
171 
172 paddr_t	  avail_start, avail_end;
173 vaddr_t	  virtual_avail, virtual_end; /* Available virtual memory	*/
174 
175 struct pv_entry *get_pventry(void);
176 void free_pventry(struct pv_entry *);
177 void more_pventries(void);
178 vaddr_t get_ptp(void);
179 void free_ptp(paddr_t);
180 
181 /*
182  * Calculation of the System Page Table is somewhat a pain, because it
183  * must be in contiguous physical memory and all size calculations must
184  * be done before memory management is turned on.
185  * Arg is usrptsize in ptes.
186  */
187 static vsize_t
calc_kvmsize(vsize_t usrptsize)188 calc_kvmsize(vsize_t usrptsize)
189 {
190 	vsize_t kvmsize, bufsz;
191 
192 	/*
193 	 * Compute the number of pages kmem_arena will have.
194 	 */
195 	kmeminit_nkmempages();
196 
197 	/* All physical memory */
198 	kvmsize = avail_end;
199 	/* User Page table area. This may be large */
200 	kvmsize += (usrptsize * sizeof(struct pte));
201 	/* Kernel stacks per process */
202 	kvmsize += (USPACE * maxproc);
203 	/* kernel malloc arena */
204 	kvmsize += nkmempages * PAGE_SIZE;
205 	/* IO device register space */
206 	kvmsize += (IOSPSZ * VAX_NBPG);
207 	/* Pager allocations */
208 	kvmsize += (pager_map_size + MAXBSIZE);
209 	/* Anon pool structures */
210 	kvmsize += (physmem * sizeof(struct vm_anon));
211 	/* kernel malloc arena */
212 	kvmsize += avail_end;
213 
214 	/* Buffer space - get size of buffer cache and set an upper limit */
215 	bufsz = buf_memcalc();
216 	buf_setvalimit(bufsz);
217 	kvmsize += bufsz;
218 
219 	/* UBC submap space */
220 	kvmsize += (UBC_NWINS << UBC_WINSHIFT);
221 
222 	/* Exec arg space */
223 	kvmsize += NCARGS;
224 #if VAX46 || VAX48 || VAX49 || VAX53 || VAXANY
225 	/* Physmap */
226 	kvmsize += VM_PHYS_SIZE;
227 #endif
228 #if VAX46 || VAX49
229 	kvmsize += 0x800000; /* 8 MB framebuffer */
230 #endif
231 #ifdef MODULAR
232 	/* Modules are allocated out of kernel_map */
233 #define MAXLKMSIZ	0x100000	/* XXX */
234 	kvmsize += MAXLKMSIZ;
235 #endif
236 
237 	/* The swapper uses many anon's, set an arbitrary size */
238 #ifndef SWAPSIZE
239 #define	SWAPSIZE (200*1024*1024)	/* Assume 200MB swap */
240 #endif
241 	kvmsize += ((SWAPSIZE/PAGE_SIZE)*sizeof(struct vm_anon));
242 
243 	/* New pipes may steal some amount of memory. Calculate 10 pipes */
244 #ifndef PIPE_SOCKETPAIR
245 	kvmsize += PIPE_DIRECT_CHUNK*10;
246 #endif
247 	kvmsize = round_page(kvmsize);
248 	return kvmsize;
249 }
250 
251 /*
252  * pmap_bootstrap().
253  * Called as part of vm bootstrap, allocates internal pmap structures.
254  * Assumes that nothing is mapped, and that kernel stack is located
255  * immediately after end.
256  */
257 void
pmap_bootstrap(void)258 pmap_bootstrap(void)
259 {
260 	struct pcb * const pcb = lwp_getpcb(&lwp0);
261 	struct pmap * const pmap = pmap_kernel();
262 	struct cpu_info *ci;
263 	extern unsigned int etext;
264 	unsigned int sysptsize, i;
265 	vsize_t kvmsize, usrptsize;
266 	vaddr_t istack;
267 
268 	/* Set logical page size */
269 	uvmexp.pagesize = NBPG;
270 	uvm_md_init();
271 
272 	physmem = btoc(avail_end);
273 
274 	usrptsize = (1024*1024*1024)/VAX_NBPG;	/* 1GB total VM */
275 	if (vax_btop(usrptsize)* PPTESZ > avail_end/20)
276 		usrptsize = (avail_end/(20 * PPTESZ)) * VAX_NBPG;
277 
278 	kvmsize = calc_kvmsize(usrptsize);
279 	/*
280 	 * Ensure that not more than 1G is allocated, since that is
281 	 * max size of S0 space.
282 	 * Also note that for full S0 space the SLR should be 0x200000,
283 	 * since the comparison in the vax microcode is >= SLR.
284 	 */
285 #define	S0SPACE	(1*1024*1024*1024)
286 	if (kvmsize > S0SPACE)
287 		kvmsize = S0SPACE;
288 	sysptsize = kvmsize >> VAX_PGSHIFT;
289 	/*
290 	 * Virtual_* and avail_* is used for mapping of system page table.
291 	 * The need for kernel virtual memory is linear dependent of the
292 	 * amount of physical memory also, therefore sysptsize is
293 	 * a variable here that is changed dependent of the physical
294 	 * memory size.
295 	 */
296 	virtual_avail = avail_end + KERNBASE;
297 	virtual_end = KERNBASE + sysptsize * VAX_NBPG;
298 	memset(Sysmap, 0, sysptsize * 4); /* clear SPT before using it */
299 
300 	/*
301 	 * The first part of Kernel Virtual memory is the physical
302 	 * memory mapped in. This makes some mm routines both simpler
303 	 * and faster, but takes ~0.75% more memory.
304 	 */
305 	pmap_map(KERNBASE, 0, avail_end, VM_PROT_READ|VM_PROT_WRITE);
306 	/*
307 	 * Kernel code is always readable for user, it must be because
308 	 * of the emulation code that is somewhere in there.
309 	 * And it doesn't hurt, /netbsd is also public readable.
310 	 * There are also a couple of other things that must be in
311 	 * physical memory and that isn't managed by the vm system.
312 	 */
313 	for (i = 0; i < ((unsigned)&etext ^ KERNBASE) >> VAX_PGSHIFT; i++)
314 		Sysmap[i].pg_prot = PROT_URKW;
315 
316 	/* Map System Page Table and zero it,  Sysmap already set. */
317 	mtpr((unsigned)Sysmap - KERNBASE, PR_SBR);
318 
319 	/* Map Interrupt stack and set red zone */
320 	istack = (uintptr_t)Sysmap + round_page(sysptsize * 4);
321 	mtpr(istack + USPACE, PR_ISP);
322 	kvtopte(istack)->pg_v = 0;
323 
324 	/* Some scratch pages */
325 	scratch = istack + USPACE;
326 
327 	/* Physical-to-virtual translation table */
328 	pv_table = (struct pv_entry *)(scratch + SCRATCHPAGES * VAX_NBPG);
329 
330 	avail_start = (vaddr_t)pv_table + (round_page(avail_end >> PGSHIFT)) *
331 	    sizeof(struct pv_entry) - KERNBASE;
332 
333 	/* Kernel message buffer */
334 	avail_end -= MSGBUFSIZE;
335 	msgbufaddr = (void *)(avail_end + KERNBASE);
336 
337 	/* zero all mapped physical memory from Sysmap to here */
338 	memset((void *)istack, 0, (avail_start + KERNBASE) - istack);
339 
340 	/* QDSS console mapping hack */
341 #if NQD > 0
342 	qdearly();
343 #endif
344 
345 	/* User page table map. This is big. */
346 	MAPVIRT(ptemapstart, vax_btoc(usrptsize * sizeof(struct pte)));
347 	ptemapend = virtual_avail;
348 
349 	MAPVIRT(iospace, IOSPSZ); /* Device iospace mapping area */
350 
351 	/* Init SCB and set up stray vectors. */
352 	avail_start = scb_init(avail_start);
353 	*(struct rpb *)0 = *(struct rpb *)(uvm_lwp_getuarea(&lwp0) + REDZONEADDR);
354 
355 	if (dep_call->cpu_steal_pages)
356 		(*dep_call->cpu_steal_pages)();
357 
358 	avail_start = round_page(avail_start);
359 	virtual_avail = round_page(virtual_avail);
360 	virtual_end = trunc_page(virtual_end);
361 
362 
363 #if 0 /* Breaks cninit() on some machines */
364 	cninit();
365 	printf("Sysmap %p, istack %lx, scratch %lx\n",Sysmap,ci->ci_istack,scratch);
366 	printf("etext %p, kvmsize %lx\n", &etext, kvmsize);
367 	printf("SYSPTSIZE %x usrptsize %lx\n",
368 	    sysptsize, usrptsize * sizeof(struct pte));
369 	printf("pv_table %p, ptemapstart %lx ptemapend %lx\n",
370 	    pv_table, ptemapstart, ptemapend);
371 	printf("avail_start %lx, avail_end %lx\n",avail_start,avail_end);
372 	printf("virtual_avail %lx,virtual_end %lx\n",
373 	    virtual_avail, virtual_end);
374 	printf("startpmapdebug %p\n",&startpmapdebug);
375 #endif
376 
377 
378 	/* Init kernel pmap */
379 	pmap->pm_p1br = (struct pte *)KERNBASE;
380 	pmap->pm_p0br = (struct pte *)KERNBASE;
381 	pmap->pm_p1lr = NPTEPERREG;
382 	pmap->pm_p0lr = 0;
383 	pmap->pm_stats.wired_count = pmap->pm_stats.resident_count = 0;
384 	    /* btop(virtual_avail - KERNBASE); */
385 
386 	pmap->pm_count = 1;
387 
388 	/* Activate the kernel pmap. */
389 	pcb->P1BR = pmap->pm_p1br;
390 	pcb->P0BR = pmap->pm_p0br;
391 	pcb->P1LR = pmap->pm_p1lr;
392 	pcb->P0LR = pmap->pm_p0lr|AST_PCB;
393 	pcb->pcb_pm = pmap;
394 	pcb->pcb_pmnext = pmap->pm_pcbs;
395 	pmap->pm_pcbs = pcb;
396 	mtpr((uintptr_t)pcb->P1BR, PR_P1BR);
397 	mtpr((uintptr_t)pcb->P0BR, PR_P0BR);
398 	mtpr(pcb->P1LR, PR_P1LR);
399 	mtpr(pcb->P0LR, PR_P0LR);
400 
401 	/* initialize SSP to point curlwp (lwp0) */
402 	pcb->SSP = (uintptr_t)&lwp0;
403 	mtpr(pcb->SSP, PR_SSP);
404 
405 	/* cpu_info struct */
406 	ci = (struct cpu_info *) scratch;
407 	lwp0.l_cpu = ci;
408 	ci->ci_istack = istack;
409 	memset(ci, 0, sizeof(*ci));
410 #if defined(MULTIPROCESSOR)
411 	ci->ci_curlwp = &lwp0;
412 	ci->ci_flags = CI_MASTERCPU|CI_RUNNING;
413 	SIMPLEQ_FIRST(&cpus) = ci;
414 #endif
415 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
416 	mutex_init(&pmap_lock, MUTEX_DEFAULT, IPL_VM);
417 #endif
418 
419 	/*
420 	 * Now everything should be complete, start virtual memory.
421 	 */
422 	uvm_page_physload(avail_start >> PGSHIFT, avail_end >> PGSHIFT,
423 	    avail_start >> PGSHIFT, avail_end >> PGSHIFT,
424 	    VM_FREELIST_DEFAULT);
425 	mtpr(sysptsize, PR_SLR);
426 	rpb.sbr = mfpr(PR_SBR);
427 	rpb.slr = mfpr(PR_SLR);
428 	rpb.wait = 0;	/* DDB signal */
429 	mtpr(1, PR_MAPEN);
430 }
431 
432 /*
433  * Define the initial bounds of the kernel virtual address space.
434  */
435 void
pmap_virtual_space(vaddr_t * vstartp,vaddr_t * vendp)436 pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
437 {
438 	*vstartp = virtual_avail;
439 	*vendp = virtual_end;
440 }
441 
442 /*
443  * Let the VM system do early memory allocation from the direct-mapped
444  * physical memory instead.
445  */
446 vaddr_t
pmap_steal_memory(vsize_t size,vaddr_t * vstartp,vaddr_t * vendp)447 pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
448 {
449 	vaddr_t v;
450 	int npgs;
451 	uvm_physseg_t bank;
452 
453 	PMDEBUG(("pmap_steal_memory: size 0x%lx start %p end %p\n",
454 		    size, vstartp, vendp));
455 
456 	size = round_page(size);
457 	npgs = btoc(size);
458 
459 #ifdef DIAGNOSTIC
460 	if (uvm.page_init_done == true)
461 		panic("pmap_steal_memory: called _after_ bootstrap");
462 #endif
463 
464 	/*
465 	 * A vax only have one segment of memory.
466 	 */
467 	bank = uvm_physseg_get_first();
468 
469 	v = (uvm_physseg_get_start(bank) << PGSHIFT) | KERNBASE;
470 	uvm_physseg_unplug(uvm_physseg_get_start(bank), npgs);
471 	memset((void *)v, 0, size);
472 	return v;
473 }
474 
475 /*
476  * pmap_init() is called as part of vm init after memory management
477  * is enabled. It is meant to do machine-specific allocations.
478  * Here is the resource map for the user page tables inited.
479  */
480 void
pmap_init(void)481 pmap_init(void)
482 {
483 	/*
484 	 * Create the extent map used to manage the page table space.
485 	 */
486 	ptemap = extent_create("ptemap", ptemapstart, ptemapend,
487 	    ptmapstorage, PTMAPSZ, EX_NOCOALESCE);
488 	if (ptemap == NULL)
489 		panic("pmap_init");
490 }
491 
492 static u_long
pmap_extwrap(vsize_t nsize)493 pmap_extwrap(vsize_t nsize)
494 {
495 	int res;
496 	u_long rv;
497 
498 	for (;;) {
499 		res = extent_alloc(ptemap, nsize, PAGE_SIZE, 0,
500 		    EX_WAITOK|EX_MALLOCOK, &rv);
501 		if (res == EAGAIN)
502 			return 0;
503 		if (res == 0)
504 			return rv;
505 	}
506 }
507 
508 /*
509  * Do a page removal from the pv table. A page is identified by its
510  * virtual address combined with its struct pmap in the pv table.
511  */
512 static void
rmpage(pmap_t pm,int * br)513 rmpage(pmap_t pm, int *br)
514 {
515 	struct pv_entry *pv, *pl, *pf;
516 	vaddr_t vaddr;
517 	int found = 0;
518 
519 	if (pm == pmap_kernel())
520 		vaddr = (br - (int *)Sysmap) * VAX_NBPG + 0x80000000;
521 	else if ((br >= (int *)pm->pm_p0br) &&
522 	    (br < ((int *)pm->pm_p0br + pm->pm_p0lr)))
523 		vaddr = (br - (int *)pm->pm_p0br) * VAX_NBPG;
524 	else
525 		vaddr = (br - (int *)pm->pm_p1br) * VAX_NBPG + 0x40000000;
526 
527 	if (IOSPACE_P((br[0] & PG_FRAME) << VAX_PGSHIFT))
528 		return; /* Forget mappings of IO space */
529 
530 	pv = pv_table + ((br[0] & PG_FRAME) >> LTOHPS);
531 	if (((br[0] & PG_PROT) == PG_RW) &&
532 	    ((pv->pv_attr & PG_M) != PG_M))
533 		pv->pv_attr |= br[0]|br[1]|br[2]|br[3]|br[4]|br[5]|br[6]|br[7];
534 	pmap_decrement_stats(pm, (br[0] & PG_W) != 0);
535 	if (pv->pv_pmap == pm && pv->pv_vaddr == vaddr) {
536 		pv->pv_vaddr = NOVADDR;
537 		pv->pv_pmap = 0;
538 		found++;
539 	} else
540 		for (pl = pv; pl->pv_next; pl = pl->pv_next) {
541 			if (pl->pv_next->pv_pmap != pm ||
542 			    pl->pv_next->pv_vaddr != vaddr)
543 				continue;
544 			pf = pl->pv_next;
545 			pl->pv_next = pl->pv_next->pv_next;
546 			free_pventry(pf);
547 			found++;
548 			break;
549 		}
550 	if (found == 0)
551 		panic("rmpage: pm %p br %p", pm, br);
552 }
553 /*
554  * Update the PCBs using this pmap after a change.
555  */
556 static void
update_pcbs(struct pmap * pm)557 update_pcbs(struct pmap *pm)
558 {
559 	struct pcb *pcb;
560 
561 	for (pcb = pm->pm_pcbs; pcb != NULL; pcb = pcb->pcb_pmnext) {
562 		KASSERT(pcb->pcb_pm == pm);
563 		pcb->P0BR = pm->pm_p0br;
564 		pcb->P0LR = pm->pm_p0lr | (pcb->P0LR & AST_MASK);
565 		pcb->P1BR = pm->pm_p1br;
566 		pcb->P1LR = pm->pm_p1lr;
567 
568 	}
569 
570 	/* If curlwp uses this pmap update the regs too */
571 	if (pm == curproc->p_vmspace->vm_map.pmap) {
572 		mtpr((uintptr_t)pm->pm_p0br, PR_P0BR);
573 		mtpr(pm->pm_p0lr, PR_P0LR);
574 		mtpr((uintptr_t)pm->pm_p1br, PR_P1BR);
575 		mtpr(pm->pm_p1lr, PR_P1LR);
576 	}
577 
578 #if defined(MULTIPROCESSOR) && defined(notyet)
579 	/* If someone else is using this pmap, be sure to reread */
580 	cpu_send_ipi(IPI_DEST_ALL, IPI_NEWPTE);
581 #endif
582 }
583 
584 /*
585  * Allocate a page through direct-mapped segment.
586  */
587 static vaddr_t
getpage(void)588 getpage(void)
589 {
590 	struct vm_page *pg;
591 
592 	pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
593 	if (pg == NULL)
594 		return 0;
595 	return (VM_PAGE_TO_PHYS(pg)|KERNBASE);
596 }
597 
598 #if 0
599 /*
600  * Free the page allocated above.
601  */
602 static void
603 freepage(vaddr_t v)
604 {
605 	paddr_t paddr = (kvtopte(v)->pg_pfn << VAX_PGSHIFT);
606 	uvm_pagefree(PHYS_TO_VM_PAGE(paddr));
607 }
608 #endif
609 
610 /*
611  * Remove a full process space. Update all processes pcbs.
612  */
613 static void
rmspace(struct pmap * pm)614 rmspace(struct pmap *pm)
615 {
616 	int lr, i, j, *br, *ptpp;
617 
618 	if (pm->pm_p0lr == 0 && pm->pm_p1lr == NPTEPERREG)
619 		return; /* Already free */
620 
621 	lr = pm->pm_p0lr/NPTEPG;
622 	for (i = 0; i < lr; i++) {
623 		ptpp = (int *)kvtopte(&pm->pm_p0br[i*NPTEPG]);
624 		if (*ptpp == 0)
625 			continue;
626 		br = (int *)&pm->pm_p0br[i*NPTEPG];
627 		for (j = 0; j < NPTEPG; j+=LTOHPN) {
628 			if (br[j] == 0)
629 				continue;
630 			rmpage(pm, &br[j]);
631 		}
632 		free_ptp((((struct pte *)ptpp)->pg_pfn << VAX_PGSHIFT));
633 		*ptpp = 0;
634 	}
635 	lr = pm->pm_p1lr/NPTEPG;
636 	for (i = lr; i < NPTEPERREG/NPTEPG; i++) {
637 		ptpp = (int *)kvtopte(&pm->pm_p1br[i*NPTEPG]);
638 		if (*ptpp == 0)
639 			continue;
640 		br = (int *)&pm->pm_p1br[i*NPTEPG];
641 		for (j = 0; j < NPTEPG; j+=LTOHPN) {
642 			if (br[j] == 0)
643 				continue;
644 			rmpage(pm, &br[j]);
645 		}
646 		free_ptp((((struct pte *)ptpp)->pg_pfn << VAX_PGSHIFT));
647 		*ptpp = 0;
648 	}
649 
650 	if (pm->pm_p0lr != 0)
651 		extent_free(ptemap, (u_long)pm->pm_p0br,
652 		    pm->pm_p0lr * PPTESZ, EX_WAITOK);
653 	if (pm->pm_p1lr != NPTEPERREG)
654 		extent_free(ptemap, (u_long)pm->pm_p1ap,
655 		    (NPTEPERREG - pm->pm_p1lr) * PPTESZ, EX_WAITOK);
656 	pm->pm_p0br = pm->pm_p1br = (struct pte *)KERNBASE;
657 	pm->pm_p0lr = 0;
658 	pm->pm_p1lr = NPTEPERREG;
659 	pm->pm_p1ap = NULL;
660 	update_pcbs(pm);
661 }
662 
663 /*
664  * Find a process to remove the process space for. *sigh*
665  * Avoid to remove ourselves.
666  */
667 
668 static inline bool
pmap_vax_swappable(struct lwp * l,struct pmap * pm)669 pmap_vax_swappable(struct lwp *l, struct pmap *pm)
670 {
671 
672 	if (l->l_flag & (LW_SYSTEM | LW_WEXIT))
673 		return false;
674 	if (l->l_proc->p_vmspace->vm_map.pmap == pm)
675 		return false;
676 	if ((l->l_pflag & LP_RUNNING) != 0)
677 		return false;
678 	if (l->l_class != SCHED_OTHER)
679 		return false;
680 	if (l->l_syncobj == &rw_syncobj || l->l_syncobj == &mutex_syncobj)
681 		return false;
682 	if (l->l_proc->p_stat != SACTIVE && l->l_proc->p_stat != SSTOP)
683 		return false;
684 	return true;
685 }
686 
687 static int
pmap_rmproc(struct pmap * pm)688 pmap_rmproc(struct pmap *pm)
689 {
690 	struct pmap *ppm;
691 	struct lwp *l;
692 	struct lwp *outl, *outl2;
693 	int outpri, outpri2;
694 	int didswap = 0;
695 	extern int maxslp;
696 
697 	outl = outl2 = NULL;
698 	outpri = outpri2 = 0;
699 	mutex_enter(&proc_lock);
700 	LIST_FOREACH(l, &alllwp, l_list) {
701 		if (!pmap_vax_swappable(l, pm))
702 			continue;
703 		ppm = l->l_proc->p_vmspace->vm_map.pmap;
704 		if (ppm->pm_p0lr == 0 && ppm->pm_p1lr == NPTEPERREG)
705 			continue; /* Already swapped */
706 		switch (l->l_stat) {
707 		case LSRUN:
708 		case LSONPROC:
709 			if (l->l_swtime > outpri2) {
710 				outl2 = l;
711 				outpri2 = l->l_swtime;
712 			}
713 			continue;
714 		case LSSLEEP:
715 		case LSSTOP:
716 			if (l->l_slptime >= maxslp) {
717 				rmspace(l->l_proc->p_vmspace->vm_map.pmap);
718 				didswap++;
719 			} else if (l->l_slptime > outpri) {
720 				outl = l;
721 				outpri = l->l_slptime;
722 			}
723 			continue;
724 		}
725 	}
726 	mutex_exit(&proc_lock);
727 	if (didswap == 0) {
728 		if ((l = outl) == NULL)
729 			l = outl2;
730 		if (l) {
731 			rmspace(l->l_proc->p_vmspace->vm_map.pmap);
732 			didswap++;
733 		}
734 	}
735 	return didswap;
736 }
737 
738 /*
739  * Allocate space for user page tables, from ptemap.
740  * Argument is needed space, in bytes.
741  * Returns a pointer to the newly allocated space, or 0 if failed.
742  */
743 static vaddr_t
pmap_getusrptes(pmap_t pm,vsize_t nsize)744 pmap_getusrptes(pmap_t pm, vsize_t nsize)
745 {
746 	u_long rv;
747 
748 #ifdef DEBUG
749 	if (nsize & PAGE_MASK)
750 		panic("pmap_getusrptes: bad size %lx", nsize);
751 #endif
752 	while (((rv = pmap_extwrap(nsize)) == 0) && (pmap_rmproc(pm) != 0))
753 		;
754 	return rv;
755 }
756 
757 /*
758  * Remove a pte page when all references are gone.
759  */
760 static void
rmptep(struct pte * pte)761 rmptep(struct pte *pte)
762 {
763 	int *ptpp = (int *)kvtopte(pte);
764 #ifdef DEBUG
765 	{	int i, *ptr = (int *)vax_trunc_page(pte);
766 		for (i = 0; i < NPTEPG; i++)
767 			if (ptr[i] != 0)
768 				panic("rmptep: ptr[%d] != 0", i);
769 	}
770 #endif
771 	free_ptp((((struct pte *)ptpp)->pg_pfn << VAX_PGSHIFT));
772 	*ptpp = 0;
773 }
774 
775 static int
grow_p0(struct pmap * pm,int reqlen)776 grow_p0(struct pmap *pm, int reqlen)
777 {
778 	vaddr_t nptespc;
779 	char *from, *to;
780 	int srclen, dstlen;
781 	int inuse, len, p0lr;
782 	u_long p0br;
783 
784 	PMDEBUG(("grow_p0: pmap %p reqlen %d\n", pm, reqlen));
785 
786 	/* Get new pte space */
787 	p0lr = pm->pm_p0lr;
788 	inuse = p0lr != 0;
789 	len = round_page((reqlen+1) * PPTESZ);
790 	PMAP_UNLOCK;
791 	nptespc = pmap_getusrptes(pm, len);
792 	PMAP_LOCK;
793 
794 	if (nptespc == 0)
795 		return 0;
796 	/*
797 	 * Copy the old ptes to the new space.
798 	 * Done by moving on system page table.
799 	 */
800 	srclen = vax_btop(p0lr * PPTESZ) * PPTESZ;
801 	dstlen = vax_btoc(len)*PPTESZ;
802 	from = (char *)kvtopte(pm->pm_p0br);
803 	to = (char *)kvtopte(nptespc);
804 
805 	PMDEBUG(("grow_p0: from %p to %p src %d dst %d\n",
806 	    from, to, srclen, dstlen));
807 
808 	if (inuse)
809 		memcpy(to, from, srclen);
810 	memset(to+srclen, 0, dstlen-srclen);
811 	p0br = (u_long)pm->pm_p0br;
812 	pm->pm_p0br = (struct pte *)nptespc;
813 	pm->pm_p0lr = (len/PPTESZ);
814 	update_pcbs(pm);
815 
816 	/* Remove the old after update_pcbs() (for multi-CPU propagation) */
817 	if (inuse)
818 		extent_free(ptemap, p0br, p0lr*PPTESZ, EX_WAITOK);
819 	return 1;
820 }
821 
822 
823 static int
grow_p1(struct pmap * pm,int len)824 grow_p1(struct pmap *pm, int len)
825 {
826 	vaddr_t nptespc, optespc;
827 	int nlen, olen;
828 
829 	PMDEBUG(("grow_p1: pm %p len %x\n", pm, len));
830 
831 	/* Get new pte space */
832 	nlen = (NPTEPERREG*PPTESZ) - trunc_page(len * PPTESZ);
833 	PMAP_UNLOCK;
834 	nptespc = pmap_getusrptes(pm, nlen);
835 	PMAP_LOCK;
836 	if (nptespc == 0)
837 		return 0;
838 
839 	olen = (NPTEPERREG*PPTESZ) - (pm->pm_p1lr * PPTESZ);
840 	optespc = (vaddr_t)pm->pm_p1ap;
841 
842 	/*
843 	 * Copy the old ptes to the new space.
844 	 * Done by moving on system page table.
845 	 */
846 	memset(kvtopte(nptespc), 0, vax_btop(nlen-olen) * PPTESZ);
847 	if (optespc)
848 		memcpy(kvtopte(nptespc+nlen-olen), kvtopte(optespc),
849 		    vax_btop(olen) * PPTESZ);
850 
851 	pm->pm_p1ap = (struct pte *)nptespc;
852 	pm->pm_p1br = (struct pte *)(nptespc+nlen-(NPTEPERREG*PPTESZ));
853 	pm->pm_p1lr = NPTEPERREG - nlen/PPTESZ;
854 	update_pcbs(pm);
855 
856 	if (optespc)
857 		extent_free(ptemap, optespc, olen, EX_WAITOK);
858 	return 1;
859 }
860 
861 /*
862  * Initialize a preallocated and zeroed pmap structure,
863  */
864 static void
pmap_pinit(pmap_t pmap)865 pmap_pinit(pmap_t pmap)
866 {
867 
868 	/*
869 	 * Do not allocate any pte's here, we don't know the size and
870 	 * we'll get a page fault anyway when some page is referenced,
871 	 * so do it then.
872 	 */
873 	pmap->pm_p0br = (struct pte *)KERNBASE;
874 	pmap->pm_p1br = (struct pte *)KERNBASE;
875 	pmap->pm_p0lr = 0;
876 	pmap->pm_p1lr = NPTEPERREG;
877 	pmap->pm_p1ap = NULL;
878 
879 	PMDEBUG(("pmap_pinit(%p): p0br=%p p0lr=0x%lx p1br=%p p1lr=0x%lx\n",
880 	    pmap, pmap->pm_p0br, pmap->pm_p0lr, pmap->pm_p1br, pmap->pm_p1lr));
881 
882 	pmap->pm_count = 1;
883 	pmap->pm_stats.resident_count = pmap->pm_stats.wired_count = 0;
884 }
885 
886 /*
887  * pmap_create() creates a pmap for a new task.
888  * If not already allocated, allocate space for one.
889  */
890 struct pmap *
pmap_create(void)891 pmap_create(void)
892 {
893 	struct pmap *pmap;
894 
895 	pmap = kmem_zalloc(sizeof(*pmap), KM_SLEEP);
896 	pmap_pinit(pmap);
897 	return pmap;
898 }
899 
900 /*
901  * Release any resources held by the given physical map.
902  * Called when a pmap initialized by pmap_pinit is being released.
903  * Should only be called if the map contains no valid mappings.
904  */
905 static void
pmap_release(struct pmap * pmap)906 pmap_release(struct pmap *pmap)
907 {
908 #ifdef DEBUG
909 	vaddr_t saddr, eaddr;
910 #endif
911 
912 	PMDEBUG(("pmap_release: pmap %p\n",pmap));
913 
914 	if (pmap->pm_p0br == 0)
915 		return;
916 
917 #ifdef DEBUG
918 #if 0
919 	for (i = 0; i < NPTEPROCSPC; i++)
920 		if (pmap->pm_pref[i])
921 			panic("pmap_release: refcnt %d index %d",
922 			    pmap->pm_pref[i], i);
923 #endif
924 
925 	saddr = (vaddr_t)pmap->pm_p0br;
926 	eaddr = saddr + pmap->pm_p0lr * PPTESZ;
927 	for (; saddr < eaddr; saddr += PAGE_SIZE)
928 		if (kvtopte(saddr)->pg_pfn)
929 			panic("pmap_release: P0 page mapped");
930 	saddr = (vaddr_t)pmap->pm_p1br + pmap->pm_p1lr * PPTESZ;
931 	eaddr = KERNBASE;
932 	for (; saddr < eaddr; saddr += PAGE_SIZE)
933 		if (kvtopte(saddr)->pg_pfn)
934 			panic("pmap_release: P1 page mapped");
935 #endif
936 	if (pmap->pm_p0lr != 0)
937 		extent_free(ptemap, (u_long)pmap->pm_p0br,
938 		    pmap->pm_p0lr * PPTESZ, EX_WAITOK);
939 	if (pmap->pm_p1lr != NPTEPERREG)
940 		extent_free(ptemap, (u_long)pmap->pm_p1ap,
941 		    (NPTEPERREG - pmap->pm_p1lr) * PPTESZ, EX_WAITOK);
942 }
943 
944 /*
945  * pmap_destroy(pmap): Remove a reference from the pmap.
946  * If the pmap is NULL then just return else decrease pm_count.
947  * If this was the last reference we call's pmap_release to release this pmap.
948  */
949 
950 void
pmap_destroy(pmap_t pmap)951 pmap_destroy(pmap_t pmap)
952 {
953 	PMDEBUG(("pmap_destroy: pmap %p\n",pmap));
954 
955 	if (atomic_dec_uint_nv(&pmap->pm_count) == 0) {
956 #ifdef DIAGNOSTIC
957 		if (pmap->pm_pcbs)
958 			panic("pmap_destroy used pmap");
959 #endif
960 		pmap_release(pmap);
961 		kmem_free(pmap, sizeof(*pmap));
962 	}
963 }
964 
965 static struct pte *
vaddrtopte(const struct pv_entry * pv)966 vaddrtopte(const struct pv_entry *pv)
967 {
968 	struct pmap *pm;
969 	if (pv->pv_pmap == NULL || pv->pv_vaddr == NOVADDR)
970 		return NULL;
971 	if (pv->pv_vaddr & KERNBASE)
972 		return &Sysmap[(pv->pv_vaddr & ~KERNBASE) >> VAX_PGSHIFT];
973 	pm = pv->pv_pmap;
974 	if (pv->pv_vaddr & 0x40000000)
975 		return &pm->pm_p1br[vax_btop(pv->pv_vaddr & ~0x40000000)];
976 	else
977 		return &pm->pm_p0br[vax_btop(pv->pv_vaddr)];
978 }
979 
980 /*
981  * New (real nice!) function that allocates memory in kernel space
982  * without tracking it in the MD code.
983  */
984 void
pmap_kenter_pa(vaddr_t va,paddr_t pa,vm_prot_t prot,u_int flags)985 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
986 {
987 	int *ptp, opte;
988 
989 	ptp = (int *)kvtopte(va);
990 	PMDEBUG(("pmap_kenter_pa: va: %lx, pa %lx, prot %x ptp %p\n",
991 	    va, pa, prot, ptp));
992 	opte = ptp[0];
993 	ptp[0] = PG_V | ((prot & VM_PROT_WRITE)? PG_KW : PG_KR) |
994 	    PG_PFNUM(pa) | PG_SREF;
995 	ptp[1] = ptp[0] + 1;
996 	ptp[2] = ptp[0] + 2;
997 	ptp[3] = ptp[0] + 3;
998 	ptp[4] = ptp[0] + 4;
999 	ptp[5] = ptp[0] + 5;
1000 	ptp[6] = ptp[0] + 6;
1001 	ptp[7] = ptp[0] + 7;
1002 	if (opte & PG_V) {
1003 #if defined(MULTIPROCESSOR)
1004 		cpu_send_ipi(IPI_DEST_ALL, IPI_TBIA);
1005 #endif
1006 		mtpr(0, PR_TBIA);
1007 	}
1008 }
1009 
1010 void
pmap_kremove(vaddr_t va,vsize_t len)1011 pmap_kremove(vaddr_t va, vsize_t len)
1012 {
1013 	struct pte *pte;
1014 #ifdef PMAPDEBUG
1015 	int i;
1016 #endif
1017 
1018 	PMDEBUG(("pmap_kremove: va: %lx, len %lx, ptp %p\n",
1019 		    va, len, kvtopte(va)));
1020 
1021 	pte = kvtopte(va);
1022 
1023 #ifdef PMAPDEBUG
1024 	/*
1025 	 * Check if any pages are on the pv list.
1026 	 * This shouldn't happen anymore.
1027 	 */
1028 	len >>= PGSHIFT;
1029 	for (i = 0; i < len; i++) {
1030 		if (pte->pg_pfn == 0)
1031 			continue;
1032 		if (pte->pg_sref == 0)
1033 			panic("pmap_kremove");
1034 		memset(pte, 0, LTOHPN * sizeof(struct pte));
1035 		pte += LTOHPN;
1036 	}
1037 #else
1038 	len >>= VAX_PGSHIFT;
1039 	memset(pte, 0, len * sizeof(struct pte));
1040 #endif
1041 #if defined(MULTIPROCESSOR)
1042 	cpu_send_ipi(IPI_DEST_ALL, IPI_TBIA);
1043 #endif
1044 	mtpr(0, PR_TBIA);
1045 }
1046 
1047 /*
1048  * pmap_enter() is the main routine that puts in mappings for pages, or
1049  * upgrades mappings to more "rights".
1050  */
1051 int
pmap_enter(pmap_t pmap,vaddr_t v,paddr_t p,vm_prot_t prot,u_int flags)1052 pmap_enter(pmap_t pmap, vaddr_t v, paddr_t p, vm_prot_t prot, u_int flags)
1053 {
1054 	struct pv_entry *pv, *tmp;
1055 	int newpte, oldpte;
1056 	int *pteptr;	/* current pte to write mapping info to */
1057 	int *ptpptr;	/* ptr to page table page */
1058 
1059 
1060 	PMDEBUG(("pmap_enter: pmap %p v %lx p %lx prot %x wired %d access %x\n",
1061 	    pmap, v, p, prot, (flags & PMAP_WIRED) != 0, flags & VM_PROT_ALL));
1062 
1063 	PMAP_LOCK;
1064 
1065 	/* Find address of correct pte */
1066 	switch (SEGTYPE(v)) {
1067 	case SYSSEG:
1068 		pteptr = ((int *)Sysmap) + vax_btop(v - KERNBASE);
1069 		newpte = (prot & VM_PROT_WRITE ? PG_KW : PG_KR);
1070 		break;
1071 
1072 	case P0SEG:
1073 		if (vax_btop(v) >= pmap->pm_p0lr)
1074 			if (grow_p0(pmap, vax_btop(v)) == 0)
1075 				goto growfail;
1076 		pteptr = (int *)pmap->pm_p0br + vax_btop(v);
1077 		newpte = (prot & VM_PROT_WRITE ? PG_RW : PG_RO);
1078 		break;
1079 
1080 	case P1SEG:
1081 		if (vax_btop(v - 0x40000000) < pmap->pm_p1lr)
1082 			if (grow_p1(pmap, vax_btop(v - 0x40000000)) == 0)
1083 				goto growfail;
1084 		pteptr = (int *)pmap->pm_p1br + vax_btop(v - 0x40000000);
1085 		newpte = (prot & VM_PROT_WRITE ? PG_RW : PG_RO);
1086 		break;
1087 	default:
1088 		panic("bad seg");
1089 	}
1090 	newpte |= vax_btop(p);
1091 
1092 	if (SEGTYPE(v) != SYSSEG) {
1093 		/*
1094 		 * Check if a pte page must be mapped in.
1095 		 */
1096 		ptpptr = (int *)kvtopte(pteptr);
1097 
1098 		if (*ptpptr == 0) {
1099 			paddr_t phys;
1100 
1101 			phys = get_ptp();
1102 			if (phys == 0) {
1103 				PMAP_UNLOCK;
1104 				if ((flags & PMAP_CANFAIL) != 0)
1105 					return ENOMEM;
1106 				panic("pmap_enter: out of memory");
1107 			}
1108 			*ptpptr = PG_V | PG_KW | PG_PFNUM(phys);
1109 		}
1110 	}
1111 
1112 	/*
1113 	 * Do not keep track of anything if mapping IO space.
1114 	 */
1115 	if (IOSPACE_P(p)) {
1116 		mapin8(pteptr, newpte);
1117 		PMAP_UNLOCK;
1118 		return 0;
1119 	}
1120 
1121 	if (flags & PMAP_WIRED)
1122 		newpte |= PG_W;
1123 
1124 	oldpte = *pteptr & ~(PG_V|PG_M);
1125 	pv = pv_table + (p >> PGSHIFT);
1126 
1127 	/* just a wiring change? */
1128 	if (newpte == (oldpte | PG_W)) {
1129 		*pteptr |= PG_W;
1130 		pmap->pm_stats.wired_count++;
1131 		PMAP_UNLOCK;
1132 		return 0;
1133 	}
1134 
1135 	/* mapping unchanged? just return. */
1136 	if (newpte == oldpte) {
1137 		PMAP_UNLOCK;
1138 		return 0;
1139 	}
1140 
1141 	/* Changing mapping? */
1142 
1143 	if ((newpte & PG_FRAME) == (oldpte & PG_FRAME)) {
1144 		/* prot change. resident_count will be increased later */
1145 		pmap_decrement_stats(pmap, (oldpte & PG_W) != 0);
1146 	} else {
1147 
1148 		/*
1149 		 * Mapped before? Remove it then.
1150 		 */
1151 
1152 		if (oldpte & PG_FRAME) {
1153 			if ((oldpte & PG_SREF) == 0)
1154 				rmpage(pmap, pteptr);
1155 			else {
1156 				PMAP_UNLOCK;
1157 				panic("pmap_enter on PG_SREF page");
1158 			}
1159 		}
1160 
1161 		if (pv->pv_pmap == NULL) {
1162 			pv->pv_vaddr = v;
1163 			pv->pv_pmap = pmap;
1164 		} else {
1165 			tmp = get_pventry();
1166 			tmp->pv_vaddr = v;
1167 			tmp->pv_pmap = pmap;
1168 			tmp->pv_next = pv->pv_next;
1169 			pv->pv_next = tmp;
1170 		}
1171 	}
1172 	pmap->pm_stats.resident_count++;
1173 	if ((flags & PMAP_WIRED) != 0)
1174 		pmap->pm_stats.wired_count++;
1175 
1176 	if (flags & (VM_PROT_READ|VM_PROT_WRITE)) {
1177 		pv->pv_attr |= PG_V;
1178 		newpte |= PG_V;
1179 	}
1180 	if (flags & VM_PROT_WRITE)
1181 		pv->pv_attr |= PG_M;
1182 
1183 	if (flags & PMAP_WIRED)
1184 		newpte |= PG_V; /* Not allowed to be invalid */
1185 
1186 	mapin8(pteptr, newpte);
1187 
1188 	if (pventries < 10)
1189 		more_pventries();
1190 
1191 	PMAP_UNLOCK;
1192 
1193 	mtpr(0, PR_TBIA); /* Always; safety belt */
1194 	return 0;
1195 
1196 growfail:
1197 	if (flags & PMAP_CANFAIL)
1198 		return ENOMEM;
1199 	panic("usrptmap space leakage");
1200 }
1201 
1202 vaddr_t
pmap_map(vaddr_t virtual,paddr_t pstart,paddr_t pend,int prot)1203 pmap_map(vaddr_t virtual, paddr_t pstart, paddr_t pend, int prot)
1204 {
1205 	vaddr_t count;
1206 	int *pentry;
1207 
1208 	PMDEBUG(("pmap_map: virt %lx, pstart %lx, pend %lx, Sysmap %p\n",
1209 	    virtual, pstart, pend, Sysmap));
1210 
1211 	pstart &= 0x7fffffffUL;
1212 	pend &= 0x7fffffffUL;
1213 	virtual &= 0x7fffffffUL;
1214 	pentry = &((int *)Sysmap)[virtual >> VAX_PGSHIFT];
1215 	for (count = pstart; count < pend; count += VAX_NBPG) {
1216 		*pentry++ = (count >> VAX_PGSHIFT)|PG_V|
1217 		    (prot & VM_PROT_WRITE ? PG_KW : PG_KR);
1218 	}
1219 	return virtual + (count - pstart) + KERNBASE;
1220 }
1221 
1222 #if 0
1223 bool
1224 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
1225 {
1226 	paddr_t pa = 0;
1227 	int	*pte, sva;
1228 
1229 	PMDEBUG(("pmap_extract: pmap %p, va %lx\n",pmap, va));
1230 
1231 	if (va & KERNBASE) {
1232 		pa = kvtophys(va); /* Is 0 if not mapped */
1233 		if (pap)
1234 			*pap = pa;
1235 		if (pa)
1236 			return (true);
1237 		return (false);
1238 	}
1239 
1240 	sva = PG_PFNUM(va);
1241 	if (va < 0x40000000) {
1242 		if (sva > pmap->pm_p0lr)
1243 			return false;
1244 		pte = (int *)pmap->pm_p0br;
1245 	} else {
1246 		if (sva < pmap->pm_p1lr)
1247 			return false;
1248 		pte = (int *)pmap->pm_p1br;
1249 	}
1250 	if (kvtopte(&pte[sva])->pg_pfn) {
1251 		if (pap)
1252 			*pap = (pte[sva] & PG_FRAME) << VAX_PGSHIFT;
1253 		return (true);
1254 	}
1255 	return (false);
1256 }
1257 #endif
1258 /*
1259  * Sets protection for a given region to prot. If prot == none then
1260  * unmap region. pmap_remove is implemented as pmap_protect with
1261  * protection none.
1262  */
1263 void
pmap_protect_long(pmap_t pmap,vaddr_t start,vaddr_t end,vm_prot_t prot)1264 pmap_protect_long(pmap_t pmap, vaddr_t start, vaddr_t end, vm_prot_t prot)
1265 {
1266 	struct	pte *pt, *pts, *ptd;
1267 	int	pr, lr;
1268 
1269 	PMDEBUG(("pmap_protect: pmap %p, start %lx, end %lx, prot %x\n",
1270 	    pmap, start, end,prot));
1271 
1272 	PMAP_LOCK;
1273 
1274 	switch (SEGTYPE(start)) {
1275 	case SYSSEG:
1276 		pt = Sysmap;
1277 #ifdef DIAGNOSTIC
1278 		if (((end & 0x3fffffff) >> VAX_PGSHIFT) > mfpr(PR_SLR))
1279 			panic("pmap_protect: outside SLR: %lx", end);
1280 #endif
1281 		start &= ~KERNBASE;
1282 		end &= ~KERNBASE;
1283 		pr = (prot & VM_PROT_WRITE ? PROT_KW : PROT_KR);
1284 		break;
1285 
1286 	case P1SEG:
1287 		if (vax_btop(end - 0x40000000) <= pmap->pm_p1lr) {
1288 			PMAP_UNLOCK;
1289 			return;
1290 		}
1291 		if (vax_btop(start - 0x40000000) < pmap->pm_p1lr)
1292 			start = pmap->pm_p1lr * VAX_NBPG;
1293 		pt = pmap->pm_p1br;
1294 		start &= 0x3fffffff;
1295 		end = (end == KERNBASE ? end >> 1 : end & 0x3fffffff);
1296 		pr = (prot & VM_PROT_WRITE ? PROT_RW : PROT_RO);
1297 		break;
1298 
1299 	case P0SEG:
1300 		lr = pmap->pm_p0lr;
1301 
1302 		/* Anything to care about at all? */
1303 		if (vax_btop(start) > lr) {
1304 			PMAP_UNLOCK;
1305 			return;
1306 		}
1307 		if (vax_btop(end) > lr)
1308 			end = lr * VAX_NBPG;
1309 		pt = pmap->pm_p0br;
1310 		pr = (prot & VM_PROT_WRITE ? PROT_RW : PROT_RO);
1311 		break;
1312 	default:
1313 		panic("unsupported segtype: %d", SEGTYPE(start));
1314 	}
1315 
1316 	pts = &pt[start >> VAX_PGSHIFT];
1317 	ptd = &pt[end >> VAX_PGSHIFT];
1318 #ifdef DEBUG
1319 	if (((int)pts - (int)pt) & 7)
1320 		panic("pmap_remove: pts not even");
1321 	if (((int)ptd - (int)pt) & 7)
1322 		panic("pmap_remove: ptd not even");
1323 #endif
1324 
1325 	while (pts < ptd) {
1326 		if (kvtopte(pts)->pg_pfn && *(int *)pts) {
1327 			if (prot == VM_PROT_NONE) {
1328 				if ((*(int *)pts & PG_SREF) == 0)
1329 					rmpage(pmap, (u_int *)pts);
1330 #ifdef DEBUG
1331 				else {
1332 					PMAP_UNLOCK;
1333 					panic("pmap_remove PG_SREF page");
1334 				}
1335 #endif
1336 				memset(pts, 0, sizeof(struct pte) * LTOHPN);
1337 				if (pt != Sysmap) {
1338 					if (ptpinuse(pts) == 0)
1339 						rmptep(pts);
1340 				}
1341 			} else {
1342 				pts[0].pg_prot = pr;
1343 				pts[1].pg_prot = pr;
1344 				pts[2].pg_prot = pr;
1345 				pts[3].pg_prot = pr;
1346 				pts[4].pg_prot = pr;
1347 				pts[5].pg_prot = pr;
1348 				pts[6].pg_prot = pr;
1349 				pts[7].pg_prot = pr;
1350 			}
1351 		}
1352 		pts += LTOHPN;
1353 	}
1354 	PMAP_UNLOCK;
1355 #ifdef MULTIPROCESSOR
1356 	cpu_send_ipi(IPI_DEST_ALL, IPI_TBIA);
1357 #endif
1358 	mtpr(0, PR_TBIA);
1359 }
1360 
1361 int pmap_simulref(int bits, int addr);
1362 
1363 /*
1364  * Called from interrupt vector routines if we get a page invalid fault.
1365  * Note: the save mask must be or'ed with 0x3f for this function.
1366  * Returns 0 if normal call, 1 if CVAX bug detected.
1367  */
1368 int
pmap_simulref(int bits,int addr)1369 pmap_simulref(int bits, int addr)
1370 {
1371 	u_int	*pte;
1372 	struct	pv_entry *pv;
1373 	paddr_t pa;
1374 
1375 	PMDEBUG(("pmap_simulref: bits %x addr %x\n", bits, addr));
1376 
1377 #ifdef DEBUG
1378 	if (bits & 1)
1379 		panic("pte trans len");
1380 #endif
1381 	/* Set address on logical page boundary */
1382 	addr &= ~PGOFSET;
1383 	/* First decode userspace addr */
1384 	if (addr >= 0) {
1385 		if ((addr << 1) < 0)
1386 			pte = (u_int *)mfpr(PR_P1BR);
1387 		else
1388 			pte = (u_int *)mfpr(PR_P0BR);
1389 		pte += PG_PFNUM(addr);
1390 		if (bits & 2) { /* PTE reference */
1391 			pte = (u_int *)kvtopte(vax_trunc_page(pte));
1392 			if (pte[0] == 0) /* Check for CVAX bug */
1393 				return 1;
1394 			panic("pmap_simulref");
1395 			pa = (u_int)pte & ~KERNBASE;
1396 		} else
1397 			pa = Sysmap[PG_PFNUM(pte)].pg_pfn << VAX_PGSHIFT;
1398 	} else {
1399 		pte = (u_int *)kvtopte(addr);
1400 		pa = (u_int)pte & ~KERNBASE;
1401 	}
1402 	pte[0] |= PG_V;
1403 	pte[1] |= PG_V;
1404 	pte[2] |= PG_V;
1405 	pte[3] |= PG_V;
1406 	pte[4] |= PG_V;
1407 	pte[5] |= PG_V;
1408 	pte[6] |= PG_V;
1409 	pte[7] |= PG_V;
1410 	if (!IOSPACE_P(pa)) { /* No pv_table fiddling in iospace */
1411 		PMAP_LOCK;
1412 		pv = pv_table + (pa >> PGSHIFT);
1413 		pv->pv_attr |= PG_V; /* Referenced */
1414 		if (bits & 4) /* (will be) modified. XXX page tables  */
1415 			pv->pv_attr |= PG_M;
1416 		PMAP_UNLOCK;
1417 	}
1418 	return 0;
1419 }
1420 
1421 /*
1422  * Clears valid bit in all ptes referenced to this physical page.
1423  */
1424 bool
pmap_clear_reference(struct vm_page * pg)1425 pmap_clear_reference(struct vm_page *pg)
1426 {
1427 	struct pv_entry *pv = pmap_pg_to_pv(pg);
1428 	struct pte *pte;
1429 	bool ref;
1430 
1431 	PMDEBUG(("pmap_clear_reference: pv_entry %p\n", pv));
1432 
1433 	PMAP_LOCK;
1434 	ref = ISSET(pv->pv_attr, PG_V);
1435 	CLR(pv->pv_attr, PG_V);
1436 	if (pv->pv_pmap != NULL) do {
1437 		pte = vaddrtopte(pv);
1438 		if (pte[0].pg_w == 0) {
1439 			pte[0].pg_v = 0; pte[1].pg_v = 0;
1440 			pte[2].pg_v = 0; pte[3].pg_v = 0;
1441 			pte[4].pg_v = 0; pte[5].pg_v = 0;
1442 			pte[6].pg_v = 0; pte[7].pg_v = 0;
1443 		}
1444 	} while ((pv = pv->pv_next) != NULL);
1445 	PMAP_UNLOCK;
1446 #ifdef MULTIPROCESSOR
1447 	cpu_send_ipi(IPI_DEST_ALL, IPI_TBIA);
1448 #endif
1449 	mtpr(0, PR_TBIA);
1450 	return ref;
1451 }
1452 
1453 /*
1454  * Checks if page is modified; returns true or false depending on result.
1455  */
1456 bool
pmap_is_modified(struct vm_page * pg)1457 pmap_is_modified(struct vm_page *pg)
1458 {
1459 	struct pv_entry *pv = pmap_pg_to_pv(pg);
1460 	bool rv;
1461 
1462 	PMDEBUG(("pmap_is_modified: pv_entry %p ", pv));
1463 
1464 	PMAP_LOCK;
1465 	rv = ISSET(pv->pv_attr, PG_M);
1466 	if (rv == false && pv->pv_pmap != NULL) do {
1467 		const struct pte * const pte = vaddrtopte(pv);
1468 		if (pte[0].pg_m | pte[1].pg_m | pte[2].pg_m | pte[3].pg_m
1469 		    | pte[4].pg_m | pte[5].pg_m | pte[6].pg_m | pte[7].pg_m) {
1470 			rv = true;
1471 			break;
1472 		}
1473 	} while ((pv = pv->pv_next) != NULL);
1474 	PMAP_UNLOCK;
1475 	return rv;
1476 }
1477 
1478 /*
1479  * Clears modify bit in all ptes referenced to this physical page.
1480  */
1481 bool
pmap_clear_modify(struct vm_page * pg)1482 pmap_clear_modify(struct vm_page *pg)
1483 {
1484 	struct pv_entry *pv = pmap_pg_to_pv(pg);
1485 	bool rv = false;
1486 
1487 	PMDEBUG(("pmap_clear_modify: pv_entry %p\n", pv));
1488 
1489 	PMAP_LOCK;
1490 	rv = ISSET(pv->pv_attr, PG_M);
1491 	CLR(pv->pv_attr, PG_M);
1492 	if (pv->pv_pmap != NULL) do {
1493 		struct pte * const pte = vaddrtopte(pv);
1494 		if (pte[0].pg_m | pte[1].pg_m | pte[2].pg_m | pte[3].pg_m |
1495 		    pte[4].pg_m | pte[5].pg_m | pte[6].pg_m | pte[7].pg_m) {
1496 			rv = true;
1497 		}
1498 		pte[0].pg_m = pte[1].pg_m = pte[2].pg_m = pte[3].pg_m = 0;
1499 		pte[4].pg_m = pte[5].pg_m = pte[6].pg_m = pte[7].pg_m = 0;
1500 	} while ((pv = pv->pv_next) != NULL);
1501 	PMAP_UNLOCK;
1502 	return rv;
1503 }
1504 
1505 /*
1506  * Lower the permission for all mappings to a given page.
1507  * Lower permission can only mean setting protection to either read-only
1508  * or none; where none is unmapping of the page.
1509  */
1510 void
pmap_page_protect_long(struct pv_entry * pv,vm_prot_t prot)1511 pmap_page_protect_long(struct pv_entry *pv, vm_prot_t prot)
1512 {
1513 	struct	pte *pt;
1514 	struct	pv_entry *opv, *pl;
1515 	int	*g;
1516 
1517 	PMDEBUG(("pmap_page_protect: pv %p, prot %x\n", pv, prot));
1518 
1519 	if (prot == VM_PROT_ALL) /* 'cannot happen' */
1520 		return;
1521 
1522 	PMAP_LOCK;
1523 	if (prot == VM_PROT_NONE) {
1524 		g = (int *)vaddrtopte(pv);
1525 		if (g) {
1526 			pmap_decrement_stats(pv->pv_pmap, (g[0] & PG_W) != 0);
1527 			if ((pv->pv_attr & (PG_V|PG_M)) != (PG_V|PG_M))
1528 				pv->pv_attr |=
1529 				    g[0]|g[1]|g[2]|g[3]|g[4]|g[5]|g[6]|g[7];
1530 			memset(g, 0, sizeof(struct pte) * LTOHPN);
1531 			if (pv->pv_pmap != pmap_kernel()) {
1532 				if (ptpinuse(g) == 0)
1533 					rmptep((void *)g);
1534 			}
1535 			pv->pv_vaddr = NOVADDR;
1536 			pv->pv_pmap = NULL;
1537 		}
1538 		pl = pv->pv_next;
1539 		pv->pv_pmap = 0;
1540 		pv->pv_next = 0;
1541 		while (pl) {
1542 			g = (int *)vaddrtopte(pl);
1543 			pmap_decrement_stats(pl->pv_pmap, (g[0] & PG_W) != 0);
1544 			if ((pv->pv_attr & (PG_V|PG_M)) != (PG_V|PG_M))
1545 				pv->pv_attr |=
1546 				    g[0]|g[1]|g[2]|g[3]|g[4]|g[5]|g[6]|g[7];
1547 			memset(g, 0, sizeof(struct pte) * LTOHPN);
1548 			if (pl->pv_pmap != pmap_kernel()) {
1549 				if (ptpinuse(g) == 0)
1550 					rmptep((void *)g);
1551 			}
1552 			opv = pl;
1553 			pl = pl->pv_next;
1554 			free_pventry(opv);
1555 		}
1556 	} else { /* read-only */
1557 		do {
1558 			int pr;
1559 			pt = vaddrtopte(pv);
1560 			if (pt == 0)
1561 				continue;
1562 			pr = ((vaddr_t)pt < ptemapstart ? PROT_KR : PROT_RO);
1563 			pt[0].pg_prot = pr; pt[1].pg_prot = pr;
1564 			pt[2].pg_prot = pr; pt[3].pg_prot = pr;
1565 			pt[4].pg_prot = pr; pt[5].pg_prot = pr;
1566 			pt[6].pg_prot = pr; pt[7].pg_prot = pr;
1567 		} while ((pv = pv->pv_next));
1568 	}
1569 	PMAP_UNLOCK;
1570 #ifdef MULTIPROCESSOR
1571 	cpu_send_ipi(IPI_DEST_ALL, IPI_TBIA);
1572 #endif
1573 	mtpr(0, PR_TBIA);
1574 }
1575 
1576 static void
pmap_remove_pcb(struct pmap * pm,struct pcb * thispcb)1577 pmap_remove_pcb(struct pmap *pm, struct pcb *thispcb)
1578 {
1579 	struct pcb *pcb, **pcbp;
1580 
1581 	for (pcbp = &pm->pm_pcbs;
1582 	     (pcb = *pcbp) != NULL;
1583 	     pcbp = &pcb->pcb_pmnext) {
1584 #ifdef DIAGNOSTIC
1585 		if (pcb->pcb_pm != pm)
1586 			panic("pmap_remove_pcb: pcb %p (pm %p) not owned by pmap %p",
1587 			    pcb, pcb->pcb_pm, pm);
1588 #endif
1589 		if (pcb == thispcb) {
1590 			*pcbp = pcb->pcb_pmnext;
1591 			thispcb->pcb_pm = NULL;
1592 			return;
1593 		}
1594 	}
1595 #ifdef DIAGNOSTIC
1596 	panic("pmap_remove_pcb: pmap %p: pcb %p not in list", pm, thispcb);
1597 #endif
1598 }
1599 
1600 /*
1601  * Activate the address space for the specified process.
1602  * Note that if the process to activate is the current process, then
1603  * the processor internal registers must also be loaded; otherwise
1604  * the current process will have wrong pagetables.
1605  */
1606 void
pmap_activate(struct lwp * l)1607 pmap_activate(struct lwp *l)
1608 {
1609 	struct pcb * const pcb = lwp_getpcb(l);
1610 	struct pmap * const pmap = l->l_proc->p_vmspace->vm_map.pmap;
1611 
1612 	PMDEBUG(("pmap_activate: l %p\n", l));
1613 
1614 	pcb->P0BR = pmap->pm_p0br;
1615 	pcb->P0LR = pmap->pm_p0lr|AST_PCB;
1616 	pcb->P1BR = pmap->pm_p1br;
1617 	pcb->P1LR = pmap->pm_p1lr;
1618 
1619 	if (pcb->pcb_pm != pmap) {
1620 		if (pcb->pcb_pm != NULL)
1621 			pmap_remove_pcb(pcb->pcb_pm, pcb);
1622 		pcb->pcb_pmnext = pmap->pm_pcbs;
1623 		pmap->pm_pcbs = pcb;
1624 		pcb->pcb_pm = pmap;
1625 	}
1626 
1627 	if (l == curlwp) {
1628 		mtpr((uintptr_t)pmap->pm_p0br, PR_P0BR);
1629 		mtpr(pmap->pm_p0lr|AST_PCB, PR_P0LR);
1630 		mtpr((uintptr_t)pmap->pm_p1br, PR_P1BR);
1631 		mtpr(pmap->pm_p1lr, PR_P1LR);
1632 		mtpr(0, PR_TBIA);
1633 	}
1634 }
1635 
1636 void
pmap_deactivate(struct lwp * l)1637 pmap_deactivate(struct lwp *l)
1638 {
1639 	struct pcb * const pcb = lwp_getpcb(l);
1640 	struct pmap * const pmap = l->l_proc->p_vmspace->vm_map.pmap;
1641 
1642 	PMDEBUG(("pmap_deactivate: l %p\n", l));
1643 
1644 	if (pcb->pcb_pm == NULL)
1645 		return;
1646 #ifdef DIAGNOSTIC
1647 	if (pcb->pcb_pm != pmap)
1648 		panic("pmap_deactivate: lwp %p pcb %p not owned by pmap %p",
1649 		    l, pcb, pmap);
1650 #endif
1651 	pmap_remove_pcb(pmap, pcb);
1652 }
1653 
1654 /*
1655  * removes the wired bit from a bunch of PTE's.
1656  */
1657 void
pmap_unwire(pmap_t pmap,vaddr_t v)1658 pmap_unwire(pmap_t pmap, vaddr_t v)
1659 {
1660 	int *pte;
1661 
1662 	PMDEBUG(("pmap_unwire: pmap %p v %lx\n", pmap, v));
1663 
1664 	PMAP_LOCK;
1665 	if (v & KERNBASE) {
1666 		pte = (int *)kvtopte(v);
1667 	} else {
1668 		if (v < 0x40000000)
1669 			pte = (int *)&pmap->pm_p0br[PG_PFNUM(v)];
1670 		else
1671 			pte = (int *)&pmap->pm_p1br[PG_PFNUM(v)];
1672 	}
1673 	pte[0] &= ~PG_W;
1674 	pmap->pm_stats.wired_count--;
1675 	PMAP_UNLOCK;
1676 }
1677 
1678 /*
1679  * pv_entry functions.
1680  */
1681 struct pv_entry *pv_list;
1682 
1683 /*
1684  * get_pventry().
1685  * The pv_table lock must be held before calling this.
1686  */
1687 struct pv_entry *
get_pventry(void)1688 get_pventry(void)
1689 {
1690 	struct pv_entry *tmp;
1691 
1692 	if (pventries == 0)
1693 		panic("get_pventry");
1694 
1695 	tmp = pv_list;
1696 	pv_list = tmp->pv_next;
1697 	pventries--;
1698 	pvinuse++;
1699 	return tmp;
1700 }
1701 
1702 /*
1703  * free_pventry().
1704  * The pv_table lock must be held before calling this.
1705  */
1706 void
free_pventry(struct pv_entry * pv)1707 free_pventry(struct pv_entry *pv)
1708 {
1709 	pv->pv_next = pv_list;
1710 	pv_list = pv;
1711 	pventries++;
1712 	pvinuse--;
1713 }
1714 
1715 /*
1716  * more_pventries().
1717  * The pmap_lock must be held before calling this.
1718  */
1719 void
more_pventries(void)1720 more_pventries(void)
1721 {
1722 	struct pv_entry *pv;
1723 	int i, count;
1724 
1725 	pv = (struct pv_entry *)getpage();
1726 	if (pv == NULL)
1727 		return;
1728 	count = PAGE_SIZE/sizeof(struct pv_entry);
1729 
1730 	for (i = 0; i < count - 1; i++)
1731 		pv[i].pv_next = &pv[i + 1];
1732 
1733 	pv[count - 1].pv_next = pv_list;
1734 	pv_list = pv;
1735 	pventries += count;
1736 }
1737 
1738 static int *ptpp;
1739 
1740 /*
1741  * Get a (vax-size) page, to use for page tables.
1742  */
1743 vaddr_t
get_ptp(void)1744 get_ptp(void)
1745 {
1746 	int *a;
1747 
1748 	if ((a = ptpp)) {
1749 		ptpp = (int *)*ptpp;
1750 		memset(a, 0, VAX_NBPG);
1751 		return (vaddr_t)a;
1752 	}
1753 	a = (int *)getpage();
1754 	if (a != NULL) {
1755 		a[128] = (int)&a[256];
1756 		a[256] = (int)&a[384];
1757 		a[384] = (int)&a[512];
1758 		a[512] = (int)&a[640];
1759 		a[640] = (int)&a[768];
1760 		a[768] = (int)&a[896];
1761 		a[896] = (int)ptpp;
1762 		ptpp = &a[128];
1763 	}
1764 	return (vaddr_t)a;
1765 }
1766 
1767 /*
1768  * Put a page table page on the free list.
1769  * The address v is in the direct-mapped area.
1770  */
1771 void
free_ptp(paddr_t v)1772 free_ptp(paddr_t v)
1773 {
1774 	v |= KERNBASE;
1775 	*(int *)v = (int)ptpp;
1776 	ptpp = (int *)v;
1777 }
1778