1 #include "param.h" 2 #include "types.h" 3 #include "defs.h" 4 #include "x86.h" 5 #include "memlayout.h" 6 #include "mmu.h" 7 #include "proc.h" 8 #include "elf.h" 9 10 extern char data[]; // defined by kernel.ld 11 pde_t *kpgdir; // for use in scheduler() 12 struct segdesc gdt[NSEGS]; 13 14 // Set up CPU's kernel segment descriptors. 15 // Run once on entry on each CPU. 16 void 17 seginit(void) 18 { 19 struct cpu *c; 20 21 // Map "logical" addresses to virtual addresses using identity map. 22 // Cannot share a CODE descriptor for both kernel and user 23 // because it would have to have DPL_USR, but the CPU forbids 24 // an interrupt from CPL=0 to DPL=3. 25 c = &cpus[cpunum()]; 26 c->gdt[SEG_KCODE] = SEG(STA_X|STA_R, 0, 0xffffffff, 0); 27 c->gdt[SEG_KDATA] = SEG(STA_W, 0, 0xffffffff, 0); 28 c->gdt[SEG_UCODE] = SEG(STA_X|STA_R, 0, 0xffffffff, DPL_USER); 29 c->gdt[SEG_UDATA] = SEG(STA_W, 0, 0xffffffff, DPL_USER); 30 31 // Map cpu, and curproc 32 c->gdt[SEG_KCPU] = SEG(STA_W, &c->cpu, 8, 0); 33 34 lgdt(c->gdt, sizeof(c->gdt)); 35 loadgs(SEG_KCPU << 3); 36 37 // Initialize cpu-local storage. 38 cpu = c; 39 proc = 0; 40 } 41 42 // Return the address of the PTE in page table pgdir 43 // that corresponds to virtual address va. If alloc!=0, 44 // create any required page table pages. 45 static pte_t * 46 walkpgdir(pde_t *pgdir, const void *va, char* (*alloc)(void)) 47 { 48 pde_t *pde; 49 pte_t *pgtab; 50 51 pde = &pgdir[PDX(va)]; 52 if(*pde & PTE_P){ 53 pgtab = (pte_t*)p2v(PTE_ADDR(*pde)); 54 } else { 55 if(!alloc || (pgtab = (pte_t*)alloc()) == 0) 56 return 0; 57 // Make sure all those PTE_P bits are zero. 58 memset(pgtab, 0, PGSIZE); 59 // The permissions here are overly generous, but they can 60 // be further restricted by the permissions in the page table 61 // entries, if necessary. 62 *pde = v2p(pgtab) | PTE_P | PTE_W | PTE_U; 63 } 64 return &pgtab[PTX(va)]; 65 } 66 67 // Create PTEs for virtual addresses starting at va that refer to 68 // physical addresses starting at pa. va and size might not 69 // be page-aligned. 70 static int 71 mappages(pde_t *pgdir, void *va, uint size, uint pa, 72 int perm, char* (*alloc)(void)) 73 { 74 char *a, *last; 75 pte_t *pte; 76 77 a = (char*)PGROUNDDOWN((uint)va); 78 last = (char*)PGROUNDDOWN(((uint)va) + size - 1); 79 for(;;){ 80 if((pte = walkpgdir(pgdir, a, alloc)) == 0) 81 return -1; 82 if(*pte & PTE_P) 83 panic("remap"); 84 *pte = pa | perm | PTE_P; 85 if(a == last) 86 break; 87 a += PGSIZE; 88 pa += PGSIZE; 89 } 90 return 0; 91 } 92 93 // There is one page table per process, plus one that's used when 94 // a CPU is not running any process (kpgdir). The kernel uses the 95 // current process's page table during system calls and interrupts; 96 // page protection bits prevent user code from using the kernel's 97 // mappings. 98 // 99 // setupkvm() and exec() set up every page table like this: 100 // 101 // 0..KERNBASE: user memory (text+data+stack+heap), mapped to 102 // phys memory allocated by the kernel 103 // KERNBASE..KERNBASE+EXTMEM: mapped to 0..EXTMEM (for I/O space) 104 // KERNBASE+EXTMEM..data: mapped to EXTMEM..V2P(data) 105 // for the kernel's instructions and r/o data 106 // data..KERNBASE+PHYSTOP: mapped to V2P(data)..PHYSTOP, 107 // rw data + free physical memory 108 // 0xfe000000..0: mapped direct (devices such as ioapic) 109 // 110 // The kernel allocates physical memory for its heap and for user memory 111 // between V2P(end) and the end of physical memory (PHYSTOP) 112 // (directly addressable from end..P2V(PHYSTOP)). 113 114 // This table defines the kernel's mappings, which are present in 115 // every process's page table. 116 static struct kmap { 117 void *virt; 118 uint phys_start; 119 uint phys_end; 120 int perm; 121 } kmap[] = { 122 { (void*) KERNBASE, 0, EXTMEM, PTE_W}, // I/O space 123 { (void*) KERNLINK, V2P(KERNLINK), V2P(data), 0}, // kernel text+rodata 124 { (void*) data, V2P(data), PHYSTOP, PTE_W}, // kernel data, memory 125 { (void*) DEVSPACE, DEVSPACE, 0, PTE_W}, // more devices 126 }; 127 128 // Set up kernel part of a page table. 129 pde_t* 130 setupkvm(char* (*alloc)(void)) 131 { 132 pde_t *pgdir; 133 struct kmap *k; 134 135 if((pgdir = (pde_t*)alloc()) == 0) 136 return 0; 137 memset(pgdir, 0, PGSIZE); 138 if (p2v(PHYSTOP) > (void*)DEVSPACE) 139 panic("PHYSTOP too high"); 140 for(k = kmap; k < &kmap[NELEM(kmap)]; k++) 141 if(mappages(pgdir, k->virt, k->phys_end - k->phys_start, 142 (uint)k->phys_start, k->perm, alloc) < 0) 143 return 0; 144 return pgdir; 145 } 146 147 // Allocate one page table for the machine for the kernel address 148 // space for scheduler processes. 149 void 150 kvmalloc(void) 151 { 152 kpgdir = setupkvm(enter_alloc); 153 switchkvm(); 154 } 155 156 // Switch h/w page table register to the kernel-only page table, 157 // for when no process is running. 158 void 159 switchkvm(void) 160 { 161 lcr3(v2p(kpgdir)); // switch to the kernel page table 162 } 163 164 // Switch TSS and h/w page table to correspond to process p. 165 void 166 switchuvm(struct proc *p) 167 { 168 pushcli(); 169 cpu->gdt[SEG_TSS] = SEG16(STS_T32A, &cpu->ts, sizeof(cpu->ts)-1, 0); 170 cpu->gdt[SEG_TSS].s = 0; 171 cpu->ts.ss0 = SEG_KDATA << 3; 172 cpu->ts.esp0 = (uint)proc->kstack + KSTACKSIZE; 173 ltr(SEG_TSS << 3); 174 if(p->pgdir == 0) 175 panic("switchuvm: no pgdir"); 176 lcr3(v2p(p->pgdir)); // switch to new address space 177 popcli(); 178 } 179 180 // Load the initcode into address 0 of pgdir. 181 // sz must be less than a page. 182 void 183 inituvm(pde_t *pgdir, char *init, uint sz) 184 { 185 char *mem; 186 187 if(sz >= PGSIZE) 188 panic("inituvm: more than a page"); 189 mem = kalloc(); 190 memset(mem, 0, PGSIZE); 191 mappages(pgdir, 0, PGSIZE, v2p(mem), PTE_W|PTE_U, kalloc); 192 memmove(mem, init, sz); 193 } 194 195 // Load a program segment into pgdir. addr must be page-aligned 196 // and the pages from addr to addr+sz must already be mapped. 197 int 198 loaduvm(pde_t *pgdir, char *addr, struct inode *ip, uint offset, uint sz) 199 { 200 uint i, pa, n; 201 pte_t *pte; 202 203 if((uint) addr % PGSIZE != 0) 204 panic("loaduvm: addr must be page aligned"); 205 for(i = 0; i < sz; i += PGSIZE){ 206 if((pte = walkpgdir(pgdir, addr+i, 0)) == 0) 207 panic("loaduvm: address should exist"); 208 pa = PTE_ADDR(*pte); 209 if(sz - i < PGSIZE) 210 n = sz - i; 211 else 212 n = PGSIZE; 213 if(readi(ip, p2v(pa), offset+i, n) != n) 214 return -1; 215 } 216 return 0; 217 } 218 219 // Allocate page tables and physical memory to grow process from oldsz to 220 // newsz, which need not be page aligned. Returns new size or 0 on error. 221 int 222 allocuvm(pde_t *pgdir, uint oldsz, uint newsz) 223 { 224 char *mem; 225 uint a; 226 227 if(newsz >= KERNBASE) 228 return 0; 229 if(newsz < oldsz) 230 return oldsz; 231 232 a = PGROUNDUP(oldsz); 233 for(; a < newsz; a += PGSIZE){ 234 mem = kalloc(); 235 if(mem == 0){ 236 cprintf("allocuvm out of memory\n"); 237 deallocuvm(pgdir, newsz, oldsz); 238 return 0; 239 } 240 memset(mem, 0, PGSIZE); 241 mappages(pgdir, (char*)a, PGSIZE, v2p(mem), PTE_W|PTE_U, kalloc); 242 } 243 return newsz; 244 } 245 246 // Deallocate user pages to bring the process size from oldsz to 247 // newsz. oldsz and newsz need not be page-aligned, nor does newsz 248 // need to be less than oldsz. oldsz can be larger than the actual 249 // process size. Returns the new process size. 250 int 251 deallocuvm(pde_t *pgdir, uint oldsz, uint newsz) 252 { 253 pte_t *pte; 254 uint a, pa; 255 256 if(newsz >= oldsz) 257 return oldsz; 258 259 a = PGROUNDUP(newsz); 260 for(; a < oldsz; a += PGSIZE){ 261 pte = walkpgdir(pgdir, (char*)a, 0); 262 if(!pte) 263 a += (NPTENTRIES - 1) * PGSIZE; 264 else if((*pte & PTE_P) != 0){ 265 pa = PTE_ADDR(*pte); 266 if(pa == 0) 267 panic("kfree"); 268 char *v = p2v(pa); 269 kfree(v); 270 *pte = 0; 271 } 272 } 273 return newsz; 274 } 275 276 // Free a page table and all the physical memory pages 277 // in the user part. 278 void 279 freevm(pde_t *pgdir) 280 { 281 uint i; 282 283 if(pgdir == 0) 284 panic("freevm: no pgdir"); 285 deallocuvm(pgdir, KERNBASE, 0); 286 for(i = 0; i < NPDENTRIES; i++){ 287 if(pgdir[i] & PTE_P){ 288 char * v = p2v(PTE_ADDR(pgdir[i])); 289 kfree(v); 290 } 291 } 292 kfree((char*)pgdir); 293 } 294 295 // Clear PTE_U on a page. Used to create an inaccessible 296 // page beneath the user stack. 297 void 298 clearpteu(pde_t *pgdir, char *uva) 299 { 300 pte_t *pte; 301 302 pte = walkpgdir(pgdir, uva, 0); 303 if(pte == 0) 304 panic("clearpteu"); 305 *pte &= ~PTE_U; 306 } 307 308 // Given a parent process's page table, create a copy 309 // of it for a child. 310 pde_t* 311 copyuvm(pde_t *pgdir, uint sz) 312 { 313 pde_t *d; 314 pte_t *pte; 315 uint pa, i; 316 char *mem; 317 318 if((d = setupkvm(kalloc)) == 0) 319 return 0; 320 for(i = 0; i < sz; i += PGSIZE){ 321 if((pte = walkpgdir(pgdir, (void *) i, 0)) == 0) 322 panic("copyuvm: pte should exist"); 323 if(!(*pte & PTE_P)) 324 panic("copyuvm: page not present"); 325 pa = PTE_ADDR(*pte); 326 if((mem = kalloc()) == 0) 327 goto bad; 328 memmove(mem, (char*)p2v(pa), PGSIZE); 329 if(mappages(d, (void*)i, PGSIZE, v2p(mem), PTE_W|PTE_U, kalloc) < 0) 330 goto bad; 331 } 332 return d; 333 334 bad: 335 freevm(d); 336 return 0; 337 } 338 339 //PAGEBREAK! 340 // Map user virtual address to kernel address. 341 char* 342 uva2ka(pde_t *pgdir, char *uva) 343 { 344 pte_t *pte; 345 346 pte = walkpgdir(pgdir, uva, 0); 347 if((*pte & PTE_P) == 0) 348 return 0; 349 if((*pte & PTE_U) == 0) 350 return 0; 351 return (char*)p2v(PTE_ADDR(*pte)); 352 } 353 354 // Copy len bytes from p to user address va in page table pgdir. 355 // Most useful when pgdir is not the current page table. 356 // uva2ka ensures this only works for PTE_U pages. 357 int 358 copyout(pde_t *pgdir, uint va, void *p, uint len) 359 { 360 char *buf, *pa0; 361 uint n, va0; 362 363 buf = (char*)p; 364 while(len > 0){ 365 va0 = (uint)PGROUNDDOWN(va); 366 pa0 = uva2ka(pgdir, (char*)va0); 367 if(pa0 == 0) 368 return -1; 369 n = PGSIZE - (va - va0); 370 if(n > len) 371 n = len; 372 memmove(pa0 + (va - va0), buf, n); 373 len -= n; 374 buf += n; 375 va = va0 + PGSIZE; 376 } 377 return 0; 378 } 379