1 /*-
2  * Copyright (c) 2017 Hans Petter Selasky
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <linux/compat.h>
31 #include <linux/completion.h>
32 #include <linux/mm.h>
33 #include <linux/kthread.h>
34 
35 #include <sys/kernel.h>
36 #include <sys/eventhandler.h>
37 #include <sys/malloc.h>
38 
39 static eventhandler_tag linuxkpi_thread_dtor_tag;
40 
41 static MALLOC_DEFINE(M_LINUX_CURRENT, "linuxcurrent", "LinuxKPI task structure");
42 
43 int
44 linux_alloc_current(struct thread *td, int flags)
45 {
46 	struct proc *proc;
47 	struct thread *td_other;
48 	struct task_struct *ts;
49 	struct task_struct *ts_other;
50 	struct mm_struct *mm;
51 	struct mm_struct *mm_other;
52 
53 	MPASS(td->td_lkpi_task == NULL);
54 
55 	ts = malloc(sizeof(*ts), M_LINUX_CURRENT, flags | M_ZERO);
56 	if (ts == NULL)
57 		return (ENOMEM);
58 
59 	mm = malloc(sizeof(*mm), M_LINUX_CURRENT, flags | M_ZERO);
60 	if (mm == NULL) {
61 		free(ts, M_LINUX_CURRENT);
62 		return (ENOMEM);
63 	}
64 
65 	/* setup new task structure */
66 	atomic_set(&ts->kthread_flags, 0);
67 	ts->task_thread = td;
68 	ts->comm = td->td_name;
69 	ts->pid = td->td_tid;
70 	ts->group_leader = ts;
71 	atomic_set(&ts->usage, 1);
72 	atomic_set(&ts->state, TASK_RUNNING);
73 	init_completion(&ts->parked);
74 	init_completion(&ts->exited);
75 
76 	proc = td->td_proc;
77 
78 	/* check if another thread already has a mm_struct */
79 	PROC_LOCK(proc);
80 	FOREACH_THREAD_IN_PROC(proc, td_other) {
81 		ts_other = td_other->td_lkpi_task;
82 		if (ts_other == NULL)
83 			continue;
84 
85 		mm_other = ts_other->mm;
86 		if (mm_other == NULL)
87 			continue;
88 
89 		/* try to share other mm_struct */
90 		if (atomic_inc_not_zero(&mm_other->mm_users)) {
91 			/* set mm_struct pointer */
92 			ts->mm = mm_other;
93 			break;
94 		}
95 	}
96 
97 	/* use allocated mm_struct as a fallback */
98 	if (ts->mm == NULL) {
99 		/* setup new mm_struct */
100 		init_rwsem(&mm->mmap_sem);
101 		atomic_set(&mm->mm_count, 1);
102 		atomic_set(&mm->mm_users, 1);
103 		/* set mm_struct pointer */
104 		ts->mm = mm;
105 		/* clear pointer to not free memory */
106 		mm = NULL;
107 	}
108 
109 	/* store pointer to task struct */
110 	td->td_lkpi_task = ts;
111 	PROC_UNLOCK(proc);
112 
113 	/* free mm_struct pointer, if any */
114 	free(mm, M_LINUX_CURRENT);
115 
116 	return (0);
117 }
118 
119 struct mm_struct *
120 linux_get_task_mm(struct task_struct *task)
121 {
122 	struct mm_struct *mm;
123 
124 	mm = task->mm;
125 	if (mm != NULL) {
126 		atomic_inc(&mm->mm_users);
127 		return (mm);
128 	}
129 	return (NULL);
130 }
131 
132 void
133 linux_mm_dtor(struct mm_struct *mm)
134 {
135 	free(mm, M_LINUX_CURRENT);
136 }
137 
138 void
139 linux_free_current(struct task_struct *ts)
140 {
141 	mmput(ts->mm);
142 	free(ts, M_LINUX_CURRENT);
143 }
144 
145 static void
146 linuxkpi_thread_dtor(void *arg __unused, struct thread *td)
147 {
148 	struct task_struct *ts;
149 
150 	ts = td->td_lkpi_task;
151 	if (ts == NULL)
152 		return;
153 
154 	td->td_lkpi_task = NULL;
155 	put_task_struct(ts);
156 }
157 
158 static struct task_struct *
159 linux_get_pid_task_int(pid_t pid, const bool do_get)
160 {
161 	struct thread *td;
162 	struct proc *p;
163 	struct task_struct *ts;
164 
165 	if (pid > PID_MAX) {
166 		/* try to find corresponding thread */
167 		td = tdfind(pid, -1);
168 		if (td != NULL) {
169 			ts = td->td_lkpi_task;
170 			if (do_get && ts != NULL)
171 				get_task_struct(ts);
172 			PROC_UNLOCK(td->td_proc);
173 			return (ts);
174 		}
175 	} else {
176 		/* try to find corresponding procedure */
177 		p = pfind(pid);
178 		if (p != NULL) {
179 			FOREACH_THREAD_IN_PROC(p, td) {
180 				ts = td->td_lkpi_task;
181 				if (ts != NULL) {
182 					if (do_get)
183 						get_task_struct(ts);
184 					PROC_UNLOCK(p);
185 					return (ts);
186 				}
187 			}
188 			PROC_UNLOCK(p);
189 		}
190 	}
191 	return (NULL);
192 }
193 
194 struct task_struct *
195 linux_pid_task(pid_t pid)
196 {
197 	return (linux_get_pid_task_int(pid, false));
198 }
199 
200 struct task_struct *
201 linux_get_pid_task(pid_t pid)
202 {
203 	return (linux_get_pid_task_int(pid, true));
204 }
205 
206 bool
207 linux_task_exiting(struct task_struct *task)
208 {
209 	struct thread *td;
210 	struct proc *p;
211 	bool ret;
212 
213 	ret = false;
214 
215 	/* try to find corresponding thread */
216 	td = tdfind(task->pid, -1);
217 	if (td != NULL) {
218 		p = td->td_proc;
219 	} else {
220 		/* try to find corresponding procedure */
221 		p = pfind(task->pid);
222 	}
223 
224 	if (p != NULL) {
225 		if ((p->p_flag & P_WEXIT) != 0)
226 			ret = true;
227 		PROC_UNLOCK(p);
228 	}
229 	return (ret);
230 }
231 
232 static void
233 linux_current_init(void *arg __unused)
234 {
235 	lkpi_alloc_current = linux_alloc_current;
236 	linuxkpi_thread_dtor_tag = EVENTHANDLER_REGISTER(thread_dtor,
237 	    linuxkpi_thread_dtor, NULL, EVENTHANDLER_PRI_ANY);
238 }
239 SYSINIT(linux_current, SI_SUB_EVENTHANDLER, SI_ORDER_SECOND, linux_current_init, NULL);
240 
241 static void
242 linux_current_uninit(void *arg __unused)
243 {
244 	struct proc *p;
245 	struct task_struct *ts;
246 	struct thread *td;
247 
248 	sx_slock(&allproc_lock);
249 	FOREACH_PROC_IN_SYSTEM(p) {
250 		PROC_LOCK(p);
251 		FOREACH_THREAD_IN_PROC(p, td) {
252 			if ((ts = td->td_lkpi_task) != NULL) {
253 				td->td_lkpi_task = NULL;
254 				put_task_struct(ts);
255 			}
256 		}
257 		PROC_UNLOCK(p);
258 	}
259 	sx_sunlock(&allproc_lock);
260 	EVENTHANDLER_DEREGISTER(thread_dtor, linuxkpi_thread_dtor_tag);
261 	lkpi_alloc_current = linux_alloc_current_noop;
262 }
263 SYSUNINIT(linux_current, SI_SUB_EVENTHANDLER, SI_ORDER_SECOND, linux_current_uninit, NULL);
264