1 /* This task handles the interface between the kernel and user-level servers. 2 * System services can be accessed by doing a system call. System calls are 3 * transformed into request messages, which are handled by this task. By 4 * convention, a sys_call() is transformed in a SYS_CALL request message that 5 * is handled in a function named do_call(). 6 * 7 * A private call vector is used to map all system calls to the functions that 8 * handle them. The actual handler functions are contained in separate files 9 * to keep this file clean. The call vector is used in the system task's main 10 * loop to handle all incoming requests. 11 * 12 * In addition to the main sys_task() entry point, which starts the main loop, 13 * there are several other minor entry points: 14 * get_priv: assign privilege structure to user or system process 15 * set_sendto_bit: allow a process to send messages to a new target 16 * unset_sendto_bit: disallow a process from sending messages to a target 17 * fill_sendto_mask: fill the target mask of a given process 18 * send_sig: send a signal directly to a system process 19 * cause_sig: take action to cause a signal to occur via a signal mgr 20 * sig_delay_done: tell PM that a process is not sending 21 * send_diag_sig: send a diagnostics signal to interested processes 22 * get_randomness: accumulate randomness in a buffer 23 * clear_endpoint: remove a process' ability to send and receive messages 24 * sched_proc: schedule a process 25 * 26 * Changes: 27 * Nov 22, 2009 get_priv supports static priv ids (Cristiano Giuffrida) 28 * Aug 04, 2005 check if system call is allowed (Jorrit N. Herder) 29 * Jul 20, 2005 send signal to services with message (Jorrit N. Herder) 30 * Jan 15, 2005 new, generalized virtual copy function (Jorrit N. Herder) 31 * Oct 10, 2004 dispatch system calls from call vector (Jorrit N. Herder) 32 * Sep 30, 2004 source code documentation updated (Jorrit N. Herder) 33 */ 34 35 #include "kernel/kernel.h" 36 #include "kernel/system.h" 37 #include "kernel/vm.h" 38 #include "kernel/clock.h" 39 #include <stdlib.h> 40 #include <assert.h> 41 #include <signal.h> 42 #include <unistd.h> 43 #include <minix/endpoint.h> 44 #include <minix/safecopies.h> 45 46 /* Declaration of the call vector that defines the mapping of system calls 47 * to handler functions. The vector is initialized in sys_init() with map(), 48 * which makes sure the system call numbers are ok. No space is allocated, 49 * because the dummy is declared extern. If an illegal call is given, the 50 * array size will be negative and this won't compile. 51 */ 52 static int (*call_vec[NR_SYS_CALLS])(struct proc * caller, message *m_ptr); 53 54 #define map(call_nr, handler) \ 55 { int call_index = call_nr-KERNEL_CALL; \ 56 assert(call_index >= 0 && call_index < NR_SYS_CALLS); \ 57 call_vec[call_index] = (handler) ; } 58 59 static void kernel_call_finish(struct proc * caller, message *msg, int result) 60 { 61 if(result == VMSUSPEND) { 62 /* Special case: message has to be saved for handling 63 * until VM tells us it's allowed. VM has been notified 64 * and we must wait for its reply to restart the call. 65 */ 66 assert(RTS_ISSET(caller, RTS_VMREQUEST)); 67 assert(caller->p_vmrequest.type == VMSTYPE_KERNELCALL); 68 caller->p_vmrequest.saved.reqmsg = *msg; 69 caller->p_misc_flags |= MF_KCALL_RESUME; 70 } else { 71 /* 72 * call is finished, we could have been suspended because of VM, 73 * remove the request message 74 */ 75 caller->p_vmrequest.saved.reqmsg.m_source = NONE; 76 if (result != EDONTREPLY) { 77 /* copy the result as a message to the original user buffer */ 78 msg->m_source = SYSTEM; 79 msg->m_type = result; /* report status of call */ 80 #if DEBUG_IPC_HOOK 81 hook_ipc_msgkresult(msg, caller); 82 #endif 83 if (copy_msg_to_user(msg, (message *)caller->p_delivermsg_vir)) { 84 printf("WARNING wrong user pointer 0x%08x from " 85 "process %s / %d\n", 86 caller->p_delivermsg_vir, 87 caller->p_name, 88 caller->p_endpoint); 89 cause_sig(proc_nr(caller), SIGSEGV); 90 } 91 } 92 } 93 } 94 95 static int kernel_call_dispatch(struct proc * caller, message *msg) 96 { 97 int result = OK; 98 int call_nr; 99 100 #if DEBUG_IPC_HOOK 101 hook_ipc_msgkcall(msg, caller); 102 #endif 103 call_nr = msg->m_type - KERNEL_CALL; 104 105 /* See if the caller made a valid request and try to handle it. */ 106 if (call_nr < 0 || call_nr >= NR_SYS_CALLS) { /* check call number */ 107 printf("SYSTEM: illegal request %d from %d.\n", 108 call_nr,msg->m_source); 109 result = EBADREQUEST; /* illegal message type */ 110 } 111 else if (!GET_BIT(priv(caller)->s_k_call_mask, call_nr)) { 112 printf("SYSTEM: denied request %d from %d.\n", 113 call_nr,msg->m_source); 114 result = ECALLDENIED; /* illegal message type */ 115 } else { 116 /* handle the system call */ 117 if (call_vec[call_nr]) 118 result = (*call_vec[call_nr])(caller, msg); 119 else { 120 printf("Unused kernel call %d from %d\n", 121 call_nr, caller->p_endpoint); 122 result = EBADREQUEST; 123 } 124 } 125 126 return result; 127 } 128 129 /*===========================================================================* 130 * kernel_call * 131 *===========================================================================*/ 132 /* 133 * this function checks the basic syscall parameters and if accepted it 134 * dispatches its handling to the right handler 135 */ 136 void kernel_call(message *m_user, struct proc * caller) 137 { 138 int result = OK; 139 message msg; 140 141 caller->p_delivermsg_vir = (vir_bytes) m_user; 142 /* 143 * the ldt and cr3 of the caller process is loaded because it just've trapped 144 * into the kernel or was already set in switch_to_user() before we resume 145 * execution of an interrupted kernel call 146 */ 147 if (copy_msg_from_user(m_user, &msg) == 0) { 148 msg.m_source = caller->p_endpoint; 149 result = kernel_call_dispatch(caller, &msg); 150 } 151 else { 152 printf("WARNING wrong user pointer 0x%08x from process %s / %d\n", 153 m_user, caller->p_name, caller->p_endpoint); 154 cause_sig(proc_nr(caller), SIGSEGV); 155 return; 156 } 157 158 159 /* remember who invoked the kcall so we can bill it its time */ 160 kbill_kcall = caller; 161 162 kernel_call_finish(caller, &msg, result); 163 } 164 165 /*===========================================================================* 166 * initialize * 167 *===========================================================================*/ 168 void system_init(void) 169 { 170 register struct priv *sp; 171 int i; 172 173 /* Initialize IRQ handler hooks. Mark all hooks available. */ 174 for (i=0; i<NR_IRQ_HOOKS; i++) { 175 irq_hooks[i].proc_nr_e = NONE; 176 } 177 178 /* Initialize all alarm timers for all processes. */ 179 for (sp=BEG_PRIV_ADDR; sp < END_PRIV_ADDR; sp++) { 180 tmr_inittimer(&(sp->s_alarm_timer)); 181 } 182 183 /* Initialize the call vector to a safe default handler. Some system calls 184 * may be disabled or nonexistant. Then explicitly map known calls to their 185 * handler functions. This is done with a macro that gives a compile error 186 * if an illegal call number is used. The ordering is not important here. 187 */ 188 for (i=0; i<NR_SYS_CALLS; i++) { 189 call_vec[i] = NULL; 190 } 191 192 /* Process management. */ 193 map(SYS_FORK, do_fork); /* a process forked a new process */ 194 map(SYS_EXEC, do_exec); /* update process after execute */ 195 map(SYS_CLEAR, do_clear); /* clean up after process exit */ 196 map(SYS_EXIT, do_exit); /* a system process wants to exit */ 197 map(SYS_PRIVCTL, do_privctl); /* system privileges control */ 198 map(SYS_TRACE, do_trace); /* request a trace operation */ 199 map(SYS_SETGRANT, do_setgrant); /* get/set own parameters */ 200 map(SYS_RUNCTL, do_runctl); /* set/clear stop flag of a process */ 201 map(SYS_UPDATE, do_update); /* update a process into another */ 202 map(SYS_STATECTL, do_statectl); /* let a process control its state */ 203 204 /* Signal handling. */ 205 map(SYS_KILL, do_kill); /* cause a process to be signaled */ 206 map(SYS_GETKSIG, do_getksig); /* signal manager checks for signals */ 207 map(SYS_ENDKSIG, do_endksig); /* signal manager finished signal */ 208 map(SYS_SIGSEND, do_sigsend); /* start POSIX-style signal */ 209 map(SYS_SIGRETURN, do_sigreturn); /* return from POSIX-style signal */ 210 211 /* Device I/O. */ 212 map(SYS_IRQCTL, do_irqctl); /* interrupt control operations */ 213 #if defined(__i386__) 214 map(SYS_DEVIO, do_devio); /* inb, inw, inl, outb, outw, outl */ 215 map(SYS_VDEVIO, do_vdevio); /* vector with devio requests */ 216 #endif 217 218 /* Memory management. */ 219 map(SYS_MEMSET, do_memset); /* write char to memory area */ 220 map(SYS_VMCTL, do_vmctl); /* various VM process settings */ 221 222 /* Copying. */ 223 map(SYS_UMAP, do_umap); /* map virtual to physical address */ 224 map(SYS_UMAP_REMOTE, do_umap_remote); /* do_umap for non-caller process */ 225 map(SYS_VUMAP, do_vumap); /* vectored virtual to physical map */ 226 map(SYS_VIRCOPY, do_vircopy); /* use pure virtual addressing */ 227 map(SYS_PHYSCOPY, do_copy); /* use physical addressing */ 228 map(SYS_SAFECOPYFROM, do_safecopy_from);/* copy with pre-granted permission */ 229 map(SYS_SAFECOPYTO, do_safecopy_to); /* copy with pre-granted permission */ 230 map(SYS_VSAFECOPY, do_vsafecopy); /* vectored safecopy */ 231 232 /* safe memset */ 233 map(SYS_SAFEMEMSET, do_safememset); /* safememset */ 234 235 /* Clock functionality. */ 236 map(SYS_TIMES, do_times); /* get uptime and process times */ 237 map(SYS_SETALARM, do_setalarm); /* schedule a synchronous alarm */ 238 map(SYS_STIME, do_stime); /* set the boottime */ 239 map(SYS_SETTIME, do_settime); /* set the system time (realtime) */ 240 map(SYS_VTIMER, do_vtimer); /* set or retrieve a virtual timer */ 241 242 /* System control. */ 243 map(SYS_ABORT, do_abort); /* abort MINIX */ 244 map(SYS_GETINFO, do_getinfo); /* request system information */ 245 map(SYS_DIAGCTL, do_diagctl); /* diagnostics-related functionality */ 246 247 /* Profiling. */ 248 map(SYS_SPROF, do_sprofile); /* start/stop statistical profiling */ 249 map(SYS_CPROF, do_cprofile); /* get/reset call profiling data */ 250 map(SYS_PROFBUF, do_profbuf); /* announce locations to kernel */ 251 252 /* arm-specific. */ 253 #if defined(__arm__) 254 map(SYS_PADCONF, do_padconf); /* configure pinmux */ 255 #endif 256 257 /* i386-specific. */ 258 #if defined(__i386__) 259 map(SYS_READBIOS, do_readbios); /* read from BIOS locations */ 260 map(SYS_IOPENABLE, do_iopenable); /* Enable I/O */ 261 map(SYS_SDEVIO, do_sdevio); /* phys_insb, _insw, _outsb, _outsw */ 262 #endif 263 264 /* Machine state switching. */ 265 map(SYS_SETMCONTEXT, do_setmcontext); /* set machine context */ 266 map(SYS_GETMCONTEXT, do_getmcontext); /* get machine context */ 267 268 /* Scheduling */ 269 map(SYS_SCHEDULE, do_schedule); /* reschedule a process */ 270 map(SYS_SCHEDCTL, do_schedctl); /* change process scheduler */ 271 272 } 273 /*===========================================================================* 274 * get_priv * 275 *===========================================================================*/ 276 int get_priv(rc, priv_id) 277 register struct proc *rc; /* new (child) process pointer */ 278 int priv_id; /* privilege id */ 279 { 280 /* Allocate a new privilege structure for a system process. Privilege ids 281 * can be assigned either statically or dynamically. 282 */ 283 register struct priv *sp; /* privilege structure */ 284 285 if(priv_id == NULL_PRIV_ID) { /* allocate slot dynamically */ 286 for (sp = BEG_DYN_PRIV_ADDR; sp < END_DYN_PRIV_ADDR; ++sp) 287 if (sp->s_proc_nr == NONE) break; 288 if (sp >= END_DYN_PRIV_ADDR) return(ENOSPC); 289 } 290 else { /* allocate slot from id */ 291 if(!is_static_priv_id(priv_id)) { 292 return EINVAL; /* invalid static priv id */ 293 } 294 if(priv[priv_id].s_proc_nr != NONE) { 295 return EBUSY; /* slot already in use */ 296 } 297 sp = &priv[priv_id]; 298 } 299 rc->p_priv = sp; /* assign new slot */ 300 rc->p_priv->s_proc_nr = proc_nr(rc); /* set association */ 301 302 return(OK); 303 } 304 305 /*===========================================================================* 306 * set_sendto_bit * 307 *===========================================================================*/ 308 void set_sendto_bit(const struct proc *rp, int id) 309 { 310 /* Allow a process to send messages to the process(es) associated with the 311 * system privilege structure with the given ID. 312 */ 313 314 /* Disallow the process from sending to a process privilege structure with no 315 * associated process, and disallow the process from sending to itself. 316 */ 317 if (id_to_nr(id) == NONE || priv_id(rp) == id) { 318 unset_sys_bit(priv(rp)->s_ipc_to, id); 319 return; 320 } 321 322 set_sys_bit(priv(rp)->s_ipc_to, id); 323 324 /* The process that this process can now send to, must be able to reply (or 325 * vice versa). Therefore, its send mask should be updated as well. Ignore 326 * receivers that don't support traps other than RECEIVE, they can't reply 327 * or send messages anyway. 328 */ 329 if (priv_addr(id)->s_trap_mask & ~((1 << RECEIVE))) 330 set_sys_bit(priv_addr(id)->s_ipc_to, priv_id(rp)); 331 } 332 333 /*===========================================================================* 334 * unset_sendto_bit * 335 *===========================================================================*/ 336 void unset_sendto_bit(const struct proc *rp, int id) 337 { 338 /* Prevent a process from sending to another process. Retain the send mask 339 * symmetry by also unsetting the bit for the other direction. 340 */ 341 342 unset_sys_bit(priv(rp)->s_ipc_to, id); 343 344 unset_sys_bit(priv_addr(id)->s_ipc_to, priv_id(rp)); 345 } 346 347 /*===========================================================================* 348 * fill_sendto_mask * 349 *===========================================================================*/ 350 void fill_sendto_mask(const struct proc *rp, sys_map_t *map) 351 { 352 int i; 353 354 for (i=0; i < NR_SYS_PROCS; i++) { 355 if (get_sys_bit(*map, i)) 356 set_sendto_bit(rp, i); 357 else 358 unset_sendto_bit(rp, i); 359 } 360 } 361 362 /*===========================================================================* 363 * send_sig * 364 *===========================================================================*/ 365 int send_sig(endpoint_t ep, int sig_nr) 366 { 367 /* Notify a system process about a signal. This is straightforward. Simply 368 * set the signal that is to be delivered in the pending signals map and 369 * send a notification with source SYSTEM. 370 */ 371 register struct proc *rp; 372 struct priv *priv; 373 int proc_nr; 374 375 if(!isokendpt(ep, &proc_nr) || isemptyn(proc_nr)) 376 return EINVAL; 377 378 rp = proc_addr(proc_nr); 379 priv = priv(rp); 380 if(!priv) return ENOENT; 381 sigaddset(&priv->s_sig_pending, sig_nr); 382 increase_proc_signals(rp); 383 mini_notify(proc_addr(SYSTEM), rp->p_endpoint); 384 385 return OK; 386 } 387 388 /*===========================================================================* 389 * cause_sig * 390 *===========================================================================*/ 391 void cause_sig(proc_nr, sig_nr) 392 proc_nr_t proc_nr; /* process to be signalled */ 393 int sig_nr; /* signal to be sent */ 394 { 395 /* A system process wants to send a signal to a process. Examples are: 396 * - HARDWARE wanting to cause a SIGSEGV after a CPU exception 397 * - TTY wanting to cause SIGINT upon getting a DEL 398 * - FS wanting to cause SIGPIPE for a broken pipe 399 * Signals are handled by sending a message to the signal manager assigned to 400 * the process. This function handles the signals and makes sure the signal 401 * manager gets them by sending a notification. The process being signaled 402 * is blocked while the signal manager has not finished all signals for it. 403 * Race conditions between calls to this function and the system calls that 404 * process pending kernel signals cannot exist. Signal related functions are 405 * only called when a user process causes a CPU exception and from the kernel 406 * process level, which runs to completion. 407 */ 408 register struct proc *rp, *sig_mgr_rp; 409 endpoint_t sig_mgr; 410 int sig_mgr_proc_nr; 411 int s; 412 413 /* Lookup signal manager. */ 414 rp = proc_addr(proc_nr); 415 sig_mgr = priv(rp)->s_sig_mgr; 416 if(sig_mgr == SELF) sig_mgr = rp->p_endpoint; 417 418 /* If the target is the signal manager of itself, send the signal directly. */ 419 if(rp->p_endpoint == sig_mgr) { 420 if(SIGS_IS_LETHAL(sig_nr)) { 421 /* If the signal is lethal, see if a backup signal manager exists. */ 422 sig_mgr = priv(rp)->s_bak_sig_mgr; 423 if(sig_mgr != NONE && isokendpt(sig_mgr, &sig_mgr_proc_nr)) { 424 priv(rp)->s_sig_mgr = sig_mgr; 425 priv(rp)->s_bak_sig_mgr = NONE; 426 sig_mgr_rp = proc_addr(sig_mgr_proc_nr); 427 RTS_UNSET(sig_mgr_rp, RTS_NO_PRIV); 428 cause_sig(proc_nr, sig_nr); /* try again with the new sig mgr. */ 429 return; 430 } 431 /* We are out of luck. Time to panic. */ 432 proc_stacktrace(rp); 433 panic("cause_sig: sig manager %d gets lethal signal %d for itself", 434 rp->p_endpoint, sig_nr); 435 } 436 sigaddset(&priv(rp)->s_sig_pending, sig_nr); 437 if(OK != send_sig(rp->p_endpoint, SIGKSIGSM)) 438 panic("send_sig failed"); 439 return; 440 } 441 442 if((s = sigismember(&rp->p_pending, sig_nr)) < 0) 443 panic("sigismember failed"); 444 /* Check if the signal is already pending. Process it otherwise. */ 445 if (!s) { 446 sigaddset(&rp->p_pending, sig_nr); 447 increase_proc_signals(rp); 448 if (! (RTS_ISSET(rp, RTS_SIGNALED))) { /* other pending */ 449 RTS_SET(rp, RTS_SIGNALED | RTS_SIG_PENDING); 450 if(OK != send_sig(sig_mgr, SIGKSIG)) 451 panic("send_sig failed"); 452 } 453 } 454 } 455 456 /*===========================================================================* 457 * sig_delay_done * 458 *===========================================================================*/ 459 void sig_delay_done(struct proc *rp) 460 { 461 /* A process is now known not to send any direct messages. 462 * Tell PM that the stop delay has ended, by sending a signal to the process. 463 * Used for actual signal delivery. 464 */ 465 466 rp->p_misc_flags &= ~MF_SIG_DELAY; 467 468 cause_sig(proc_nr(rp), SIGSNDELAY); 469 } 470 471 /*===========================================================================* 472 * send_diag_sig * 473 *===========================================================================*/ 474 void send_diag_sig(void) 475 { 476 /* Send a SIGKMESS signal to all processes in receiving updates about new 477 * diagnostics messages. 478 */ 479 struct priv *privp; 480 endpoint_t ep; 481 482 for (privp = BEG_PRIV_ADDR; privp < END_PRIV_ADDR; privp++) { 483 if (privp->s_proc_nr != NONE && privp->s_diag_sig == TRUE) { 484 ep = proc_addr(privp->s_proc_nr)->p_endpoint; 485 send_sig(ep, SIGKMESS); 486 } 487 } 488 } 489 490 /*===========================================================================* 491 * clear_ipc * 492 *===========================================================================*/ 493 static void clear_ipc( 494 register struct proc *rc /* slot of process to clean up */ 495 ) 496 { 497 /* Clear IPC data for a given process slot. */ 498 struct proc **xpp; /* iterate over caller queue */ 499 500 if (RTS_ISSET(rc, RTS_SENDING)) { 501 int target_proc; 502 503 okendpt(rc->p_sendto_e, &target_proc); 504 xpp = &proc_addr(target_proc)->p_caller_q; /* destination's queue */ 505 while (*xpp) { /* check entire queue */ 506 if (*xpp == rc) { /* process is on the queue */ 507 *xpp = (*xpp)->p_q_link; /* replace by next process */ 508 #if DEBUG_ENABLE_IPC_WARNINGS 509 printf("endpoint %d / %s removed from queue at %d\n", 510 rc->p_endpoint, rc->p_name, rc->p_sendto_e); 511 #endif 512 break; /* can only be queued once */ 513 } 514 xpp = &(*xpp)->p_q_link; /* proceed to next queued */ 515 } 516 RTS_UNSET(rc, RTS_SENDING); 517 } 518 RTS_UNSET(rc, RTS_RECEIVING); 519 } 520 521 /*===========================================================================* 522 * clear_endpoint * 523 *===========================================================================*/ 524 void clear_endpoint(rc) 525 register struct proc *rc; /* slot of process to clean up */ 526 { 527 if(isemptyp(rc)) panic("clear_proc: empty process: %d", rc->p_endpoint); 528 529 530 #if DEBUG_IPC_HOOK 531 hook_ipc_clear(rc); 532 #endif 533 534 /* Make sure that the exiting process is no longer scheduled. */ 535 RTS_SET(rc, RTS_NO_ENDPOINT); 536 if (priv(rc)->s_flags & SYS_PROC) 537 { 538 priv(rc)->s_asynsize= 0; 539 } 540 541 /* If the process happens to be queued trying to send a 542 * message, then it must be removed from the message queues. 543 */ 544 clear_ipc(rc); 545 546 /* Likewise, if another process was sending or receive a message to or from 547 * the exiting process, it must be alerted that process no longer is alive. 548 * Check all processes. 549 */ 550 clear_ipc_refs(rc, EDEADSRCDST); 551 552 } 553 554 /*===========================================================================* 555 * clear_ipc_refs * 556 *===========================================================================*/ 557 void clear_ipc_refs(rc, caller_ret) 558 register struct proc *rc; /* slot of process to clean up */ 559 int caller_ret; /* code to return on callers */ 560 { 561 /* Clear IPC references for a given process slot. */ 562 struct proc *rp; /* iterate over process table */ 563 int src_id; 564 565 /* Tell processes that sent asynchronous messages to 'rc' they are not 566 * going to be delivered */ 567 while ((src_id = has_pending_asend(rc, ANY)) != NULL_PRIV_ID) 568 cancel_async(proc_addr(id_to_nr(src_id)), rc); 569 570 for (rp = BEG_PROC_ADDR; rp < END_PROC_ADDR; rp++) { 571 if(isemptyp(rp)) 572 continue; 573 574 /* Unset pending notification bits. */ 575 unset_sys_bit(priv(rp)->s_notify_pending, priv(rc)->s_id); 576 577 /* Unset pending asynchronous messages */ 578 unset_sys_bit(priv(rp)->s_asyn_pending, priv(rc)->s_id); 579 580 /* Check if process depends on given process. */ 581 if (P_BLOCKEDON(rp) == rc->p_endpoint) { 582 rp->p_reg.retreg = caller_ret; /* return requested code */ 583 clear_ipc(rp); 584 } 585 } 586 } 587 588 /*===========================================================================* 589 * kernel_call_resume * 590 *===========================================================================*/ 591 void kernel_call_resume(struct proc *caller) 592 { 593 int result; 594 595 assert(!RTS_ISSET(caller, RTS_SLOT_FREE)); 596 assert(!RTS_ISSET(caller, RTS_VMREQUEST)); 597 598 assert(caller->p_vmrequest.saved.reqmsg.m_source == caller->p_endpoint); 599 600 /* 601 printf("KERNEL_CALL restart from %s / %d rts 0x%08x misc 0x%08x\n", 602 caller->p_name, caller->p_endpoint, 603 caller->p_rts_flags, caller->p_misc_flags); 604 */ 605 606 /* re-execute the kernel call, with MF_KCALL_RESUME still set so 607 * the call knows this is a retry. 608 */ 609 result = kernel_call_dispatch(caller, &caller->p_vmrequest.saved.reqmsg); 610 /* 611 * we are resuming the kernel call so we have to remove this flag so it 612 * can be set again 613 */ 614 caller->p_misc_flags &= ~MF_KCALL_RESUME; 615 kernel_call_finish(caller, &caller->p_vmrequest.saved.reqmsg, result); 616 } 617 618 /*===========================================================================* 619 * sched_proc * 620 *===========================================================================*/ 621 int sched_proc(struct proc *p, 622 int priority, 623 int quantum, 624 int cpu) 625 { 626 /* Make sure the values given are within the allowed range.*/ 627 if ((priority < TASK_Q && priority != -1) || priority > NR_SCHED_QUEUES) 628 return(EINVAL); 629 630 if (quantum < 1 && quantum != -1) 631 return(EINVAL); 632 633 #ifdef CONFIG_SMP 634 if ((cpu < 0 && cpu != -1) || (cpu > 0 && (unsigned) cpu >= ncpus)) 635 return(EINVAL); 636 if (cpu != -1 && !(cpu_is_ready(cpu))) 637 return EBADCPU; 638 #endif 639 640 /* In some cases, we might be rescheduling a runnable process. In such 641 * a case (i.e. if we are updating the priority) we set the NO_QUANTUM 642 * flag before the generic unset to dequeue/enqueue the process 643 */ 644 645 /* FIXME this preempts the process, do we really want to do that ?*/ 646 647 /* FIXME this is a problem for SMP if the processes currently runs on a 648 * different CPU */ 649 if (proc_is_runnable(p)) { 650 #ifdef CONFIG_SMP 651 if (p->p_cpu != cpuid && cpu != -1 && cpu != p->p_cpu) { 652 smp_schedule_migrate_proc(p, cpu); 653 } 654 #endif 655 656 RTS_SET(p, RTS_NO_QUANTUM); 657 } 658 659 if (proc_is_runnable(p)) 660 RTS_SET(p, RTS_NO_QUANTUM); 661 662 if (priority != -1) 663 p->p_priority = priority; 664 if (quantum != -1) { 665 p->p_quantum_size_ms = quantum; 666 p->p_cpu_time_left = ms_2_cpu_time(quantum); 667 } 668 #ifdef CONFIG_SMP 669 if (cpu != -1) 670 p->p_cpu = cpu; 671 #endif 672 673 /* Clear the scheduling bit and enqueue the process */ 674 RTS_UNSET(p, RTS_NO_QUANTUM); 675 676 return OK; 677 } 678 679