xref: /minix/minix/kernel/system/do_vmctl.c (revision 0a6a1f1d)
1 /* The kernel call implemented in this file:
2  *   m_type:	SYS_VMCTL
3  *
4  * The parameters for this kernel call are:
5  *   	SVMCTL_WHO	which process
6  *    	SVMCTL_PARAM	set this setting (VMCTL_*)
7  *    	SVMCTL_VALUE	to this value
8  */
9 
10 #include "kernel/system.h"
11 #include "kernel/vm.h"
12 #include "kernel/debug.h"
13 #include <assert.h>
14 #include <minix/type.h>
15 
16 /*===========================================================================*
17  *				do_vmctl				     *
18  *===========================================================================*/
19 int do_vmctl(struct proc * caller, message * m_ptr)
20 {
21   int proc_nr;
22   endpoint_t ep = m_ptr->SVMCTL_WHO;
23   struct proc *p, *rp, **rpp, *target;
24 
25   if(ep == SELF) { ep = caller->p_endpoint; }
26 
27   if(!isokendpt(ep, &proc_nr)) {
28 	printf("do_vmctl: unexpected endpoint %d from VM\n", ep);
29 	return EINVAL;
30   }
31 
32   p = proc_addr(proc_nr);
33 
34   switch(m_ptr->SVMCTL_PARAM) {
35 	case VMCTL_CLEAR_PAGEFAULT:
36 		assert(RTS_ISSET(p,RTS_PAGEFAULT));
37 		RTS_UNSET(p, RTS_PAGEFAULT);
38 		return OK;
39 	case VMCTL_MEMREQ_GET:
40 		/* Send VM the information about the memory request. We can
41 		 * not simply send the first request on the list, because IPC
42 		 * filters may forbid VM from getting requests for particular
43 		 * sources. However, IPC filters are used only in rare cases.
44 		 */
45 		for (rpp = &vmrequest; *rpp != NULL;
46 		    rpp = &(*rpp)->p_vmrequest.nextrequestor) {
47 			rp = *rpp;
48 
49 			assert(RTS_ISSET(rp, RTS_VMREQUEST));
50 
51 			okendpt(rp->p_vmrequest.target, &proc_nr);
52 			target = proc_addr(proc_nr);
53 
54 			/* Check against IPC filters. */
55 			if (!allow_ipc_filtered_memreq(rp, target))
56 				continue;
57 
58 			/* Reply with request fields. */
59 			if (rp->p_vmrequest.req_type != VMPTYPE_CHECK)
60 				panic("VMREQUEST wrong type");
61 
62 			m_ptr->SVMCTL_MRG_TARGET	=
63 				rp->p_vmrequest.target;
64 			m_ptr->SVMCTL_MRG_ADDR		=
65 				rp->p_vmrequest.params.check.start;
66 			m_ptr->SVMCTL_MRG_LENGTH	=
67 				rp->p_vmrequest.params.check.length;
68 			m_ptr->SVMCTL_MRG_FLAG		=
69 				rp->p_vmrequest.params.check.writeflag;
70 			m_ptr->SVMCTL_MRG_REQUESTOR	=
71 				(void *) rp->p_endpoint;
72 
73 			rp->p_vmrequest.vmresult = VMSUSPEND;
74 
75 			/* Remove from request chain. */
76 			*rpp = rp->p_vmrequest.nextrequestor;
77 
78 			return rp->p_vmrequest.req_type;
79 		}
80 
81 		return ENOENT;
82 
83 	case VMCTL_MEMREQ_REPLY:
84 		assert(RTS_ISSET(p, RTS_VMREQUEST));
85 		assert(p->p_vmrequest.vmresult == VMSUSPEND);
86   		okendpt(p->p_vmrequest.target, &proc_nr);
87 		target = proc_addr(proc_nr);
88 		p->p_vmrequest.vmresult = m_ptr->SVMCTL_VALUE;
89 		assert(p->p_vmrequest.vmresult != VMSUSPEND);
90 
91 		switch(p->p_vmrequest.type) {
92 		case VMSTYPE_KERNELCALL:
93 			/*
94 			 * we will have to resume execution of the kernel call
95 			 * as soon the scheduler picks up this process again
96 			 */
97 			p->p_misc_flags |= MF_KCALL_RESUME;
98 			break;
99 		case VMSTYPE_DELIVERMSG:
100 			assert(p->p_misc_flags & MF_DELIVERMSG);
101 			assert(p == target);
102 			assert(RTS_ISSET(p, RTS_VMREQUEST));
103 			break;
104 		case VMSTYPE_MAP:
105 			assert(RTS_ISSET(p, RTS_VMREQUEST));
106 			break;
107 		default:
108 			panic("strange request type: %d",p->p_vmrequest.type);
109 		}
110 
111 		RTS_UNSET(p, RTS_VMREQUEST);
112 		return OK;
113 
114 	case VMCTL_KERN_PHYSMAP:
115 	{
116 		int i = m_ptr->SVMCTL_VALUE;
117 		return arch_phys_map(i,
118 			(phys_bytes *) &m_ptr->SVMCTL_MAP_PHYS_ADDR,
119 			(phys_bytes *) &m_ptr->SVMCTL_MAP_PHYS_LEN,
120 			&m_ptr->SVMCTL_MAP_FLAGS);
121 	}
122 	case VMCTL_KERN_MAP_REPLY:
123 	{
124 		return arch_phys_map_reply(m_ptr->SVMCTL_VALUE,
125 			(vir_bytes) m_ptr->SVMCTL_MAP_VIR_ADDR);
126 	}
127 	case VMCTL_VMINHIBIT_SET:
128 		/* check if we must stop a process on a different CPU */
129 #if CONFIG_SMP
130 		if (p->p_cpu != cpuid) {
131 			smp_schedule_vminhibit(p);
132 		} else
133 #endif
134 			RTS_SET(p, RTS_VMINHIBIT);
135 #if CONFIG_SMP
136 		p->p_misc_flags |= MF_FLUSH_TLB;
137 #endif
138 		return OK;
139 	case VMCTL_VMINHIBIT_CLEAR:
140 		assert(RTS_ISSET(p, RTS_VMINHIBIT));
141 		/*
142 		 * the processes is certainly not runnable, no need to tell its
143 		 * cpu
144 		 */
145 		RTS_UNSET(p, RTS_VMINHIBIT);
146 #ifdef CONFIG_SMP
147 		if (p->p_misc_flags & MF_SENDA_VM_MISS) {
148 			struct priv *privp;
149 			p->p_misc_flags &= ~MF_SENDA_VM_MISS;
150 			privp = priv(p);
151 			try_deliver_senda(p, (asynmsg_t *) privp->s_asyntab,
152 							privp->s_asynsize);
153 		}
154 		/*
155 		 * We don't know whether kernel has the changed mapping
156 		 * installed to access userspace memory. And if so, on what CPU.
157 		 * More over we don't know what mapping has changed and how and
158 		 * therefore we must invalidate all mappings we have anywhere.
159 		 * Next time we map memory, we map it fresh.
160 		 */
161 		bits_fill(p->p_stale_tlb, CONFIG_MAX_CPUS);
162 #endif
163 		return OK;
164 	case VMCTL_CLEARMAPCACHE:
165 		/* VM says: forget about old mappings we have cached. */
166 		mem_clear_mapcache();
167 		return OK;
168 	case VMCTL_BOOTINHIBIT_CLEAR:
169 		RTS_UNSET(p, RTS_BOOTINHIBIT);
170 		return OK;
171   }
172 
173   /* Try architecture-specific vmctls. */
174   return arch_do_vmctl(m_ptr, p);
175 }
176