xref: /minix/minix/kernel/system/do_vmctl.c (revision 83133719)
1 /* The kernel call implemented in this file:
2  *   m_type:	SYS_VMCTL
3  *
4  * The parameters for this kernel call are:
5  *   	SVMCTL_WHO	which process
6  *    	SVMCTL_PARAM	set this setting (VMCTL_*)
7  *    	SVMCTL_VALUE	to this value
8  */
9 
10 #include "kernel/system.h"
11 #include "kernel/vm.h"
12 #include "kernel/debug.h"
13 #include <assert.h>
14 #include <minix/type.h>
15 
16 /*===========================================================================*
17  *				do_vmctl				     *
18  *===========================================================================*/
19 int do_vmctl(struct proc * caller, message * m_ptr)
20 {
21   int proc_nr;
22   endpoint_t ep = m_ptr->SVMCTL_WHO;
23   struct proc *p, *rp, *target;
24 
25   if(ep == SELF) { ep = caller->p_endpoint; }
26 
27   if(!isokendpt(ep, &proc_nr)) {
28 	printf("do_vmctl: unexpected endpoint %d from VM\n", ep);
29 	return EINVAL;
30   }
31 
32   p = proc_addr(proc_nr);
33 
34   switch(m_ptr->SVMCTL_PARAM) {
35 	case VMCTL_CLEAR_PAGEFAULT:
36 		assert(RTS_ISSET(p,RTS_PAGEFAULT));
37 		RTS_UNSET(p, RTS_PAGEFAULT);
38 		return OK;
39 	case VMCTL_MEMREQ_GET:
40 		/* Send VM the information about the memory request.  */
41 		if(!(rp = vmrequest))
42 			return ESRCH;
43 		assert(RTS_ISSET(rp, RTS_VMREQUEST));
44 
45 		okendpt(rp->p_vmrequest.target, &proc_nr);
46 		target = proc_addr(proc_nr);
47 
48 		/* Reply with request fields. */
49 		switch(rp->p_vmrequest.req_type) {
50 		case VMPTYPE_CHECK:
51 			m_ptr->SVMCTL_MRG_TARGET	=
52 				rp->p_vmrequest.target;
53 			m_ptr->SVMCTL_MRG_ADDR		=
54 				rp->p_vmrequest.params.check.start;
55 			m_ptr->SVMCTL_MRG_LENGTH	=
56 				rp->p_vmrequest.params.check.length;
57 			m_ptr->SVMCTL_MRG_FLAG		=
58 				rp->p_vmrequest.params.check.writeflag;
59 			m_ptr->SVMCTL_MRG_REQUESTOR	=
60 				(void *) rp->p_endpoint;
61 			break;
62 		default:
63 			panic("VMREQUEST wrong type");
64 		}
65 
66 		rp->p_vmrequest.vmresult = VMSUSPEND;
67 
68 		/* Remove from request chain. */
69 		vmrequest = vmrequest->p_vmrequest.nextrequestor;
70 
71 		return rp->p_vmrequest.req_type;
72 	case VMCTL_MEMREQ_REPLY:
73 		assert(RTS_ISSET(p, RTS_VMREQUEST));
74 		assert(p->p_vmrequest.vmresult == VMSUSPEND);
75   		okendpt(p->p_vmrequest.target, &proc_nr);
76 		target = proc_addr(proc_nr);
77 		p->p_vmrequest.vmresult = m_ptr->SVMCTL_VALUE;
78 		assert(p->p_vmrequest.vmresult != VMSUSPEND);
79 
80 		switch(p->p_vmrequest.type) {
81 		case VMSTYPE_KERNELCALL:
82 			/*
83 			 * we will have to resume execution of the kernel call
84 			 * as soon the scheduler picks up this process again
85 			 */
86 			p->p_misc_flags |= MF_KCALL_RESUME;
87 			break;
88 		case VMSTYPE_DELIVERMSG:
89 			assert(p->p_misc_flags & MF_DELIVERMSG);
90 			assert(p == target);
91 			assert(RTS_ISSET(p, RTS_VMREQUEST));
92 			break;
93 		case VMSTYPE_MAP:
94 			assert(RTS_ISSET(p, RTS_VMREQUEST));
95 			break;
96 		default:
97 			panic("strange request type: %d",p->p_vmrequest.type);
98 		}
99 
100 		RTS_UNSET(p, RTS_VMREQUEST);
101 		return OK;
102 
103 	case VMCTL_KERN_PHYSMAP:
104 	{
105 		int i = m_ptr->SVMCTL_VALUE;
106 		return arch_phys_map(i,
107 			(phys_bytes *) &m_ptr->SVMCTL_MAP_PHYS_ADDR,
108 			(phys_bytes *) &m_ptr->SVMCTL_MAP_PHYS_LEN,
109 			&m_ptr->SVMCTL_MAP_FLAGS);
110 	}
111 	case VMCTL_KERN_MAP_REPLY:
112 	{
113 		return arch_phys_map_reply(m_ptr->SVMCTL_VALUE,
114 			(vir_bytes) m_ptr->SVMCTL_MAP_VIR_ADDR);
115 	}
116 	case VMCTL_VMINHIBIT_SET:
117 		/* check if we must stop a process on a different CPU */
118 #if CONFIG_SMP
119 		if (p->p_cpu != cpuid) {
120 			smp_schedule_vminhibit(p);
121 		} else
122 #endif
123 			RTS_SET(p, RTS_VMINHIBIT);
124 #if CONFIG_SMP
125 		p->p_misc_flags |= MF_FLUSH_TLB;
126 #endif
127 		return OK;
128 	case VMCTL_VMINHIBIT_CLEAR:
129 		assert(RTS_ISSET(p, RTS_VMINHIBIT));
130 		/*
131 		 * the processes is certainly not runnable, no need to tell its
132 		 * cpu
133 		 */
134 		RTS_UNSET(p, RTS_VMINHIBIT);
135 #ifdef CONFIG_SMP
136 		if (p->p_misc_flags & MF_SENDA_VM_MISS) {
137 			struct priv *privp;
138 			p->p_misc_flags &= ~MF_SENDA_VM_MISS;
139 			privp = priv(p);
140 			try_deliver_senda(p, (asynmsg_t *) privp->s_asyntab,
141 							privp->s_asynsize);
142 		}
143 		/*
144 		 * We don't know whether kernel has the changed mapping
145 		 * installed to access userspace memory. And if so, on what CPU.
146 		 * More over we don't know what mapping has changed and how and
147 		 * therefore we must invalidate all mappings we have anywhere.
148 		 * Next time we map memory, we map it fresh.
149 		 */
150 		bits_fill(p->p_stale_tlb, CONFIG_MAX_CPUS);
151 #endif
152 		return OK;
153 	case VMCTL_CLEARMAPCACHE:
154 		/* VM says: forget about old mappings we have cached. */
155 		mem_clear_mapcache();
156 		return OK;
157 	case VMCTL_BOOTINHIBIT_CLEAR:
158 		RTS_UNSET(p, RTS_BOOTINHIBIT);
159 		return OK;
160   }
161 
162   /* Try architecture-specific vmctls. */
163   return arch_do_vmctl(m_ptr, p);
164 }
165