xref: /linux/kernel/livepatch/transition.c (revision 67059d65)
1d83a7cb3SJosh Poimboeuf /*
2d83a7cb3SJosh Poimboeuf  * transition.c - Kernel Live Patching transition functions
3d83a7cb3SJosh Poimboeuf  *
4d83a7cb3SJosh Poimboeuf  * Copyright (C) 2015-2016 Josh Poimboeuf <jpoimboe@redhat.com>
5d83a7cb3SJosh Poimboeuf  *
6d83a7cb3SJosh Poimboeuf  * This program is free software; you can redistribute it and/or
7d83a7cb3SJosh Poimboeuf  * modify it under the terms of the GNU General Public License
8d83a7cb3SJosh Poimboeuf  * as published by the Free Software Foundation; either version 2
9d83a7cb3SJosh Poimboeuf  * of the License, or (at your option) any later version.
10d83a7cb3SJosh Poimboeuf  *
11d83a7cb3SJosh Poimboeuf  * This program is distributed in the hope that it will be useful,
12d83a7cb3SJosh Poimboeuf  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13d83a7cb3SJosh Poimboeuf  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14d83a7cb3SJosh Poimboeuf  * GNU General Public License for more details.
15d83a7cb3SJosh Poimboeuf  *
16d83a7cb3SJosh Poimboeuf  * You should have received a copy of the GNU General Public License
17d83a7cb3SJosh Poimboeuf  * along with this program; if not, see <http://www.gnu.org/licenses/>.
18d83a7cb3SJosh Poimboeuf  */
19d83a7cb3SJosh Poimboeuf 
20d83a7cb3SJosh Poimboeuf #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21d83a7cb3SJosh Poimboeuf 
22d83a7cb3SJosh Poimboeuf #include <linux/cpu.h>
23d83a7cb3SJosh Poimboeuf #include <linux/stacktrace.h>
2410517429SJiri Kosina #include "core.h"
25d83a7cb3SJosh Poimboeuf #include "patch.h"
26d83a7cb3SJosh Poimboeuf #include "transition.h"
27d83a7cb3SJosh Poimboeuf #include "../sched/sched.h"
28d83a7cb3SJosh Poimboeuf 
29d83a7cb3SJosh Poimboeuf #define MAX_STACK_ENTRIES  100
30d83a7cb3SJosh Poimboeuf #define STACK_ERR_BUF_SIZE 128
31d83a7cb3SJosh Poimboeuf 
32cba82deaSMiroslav Benes #define SIGNALS_TIMEOUT 15
33cba82deaSMiroslav Benes 
34d83a7cb3SJosh Poimboeuf struct klp_patch *klp_transition_patch;
35d83a7cb3SJosh Poimboeuf 
36d83a7cb3SJosh Poimboeuf static int klp_target_state = KLP_UNDEFINED;
37d83a7cb3SJosh Poimboeuf 
38cba82deaSMiroslav Benes static unsigned int klp_signals_cnt;
39cba82deaSMiroslav Benes 
40d83a7cb3SJosh Poimboeuf /*
41d83a7cb3SJosh Poimboeuf  * This work can be performed periodically to finish patching or unpatching any
42d83a7cb3SJosh Poimboeuf  * "straggler" tasks which failed to transition in the first attempt.
43d83a7cb3SJosh Poimboeuf  */
44d83a7cb3SJosh Poimboeuf static void klp_transition_work_fn(struct work_struct *work)
45d83a7cb3SJosh Poimboeuf {
46d83a7cb3SJosh Poimboeuf 	mutex_lock(&klp_mutex);
47d83a7cb3SJosh Poimboeuf 
48d83a7cb3SJosh Poimboeuf 	if (klp_transition_patch)
49d83a7cb3SJosh Poimboeuf 		klp_try_complete_transition();
50d83a7cb3SJosh Poimboeuf 
51d83a7cb3SJosh Poimboeuf 	mutex_unlock(&klp_mutex);
52d83a7cb3SJosh Poimboeuf }
53d83a7cb3SJosh Poimboeuf static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
54d83a7cb3SJosh Poimboeuf 
55d83a7cb3SJosh Poimboeuf /*
56842c0884SPetr Mladek  * This function is just a stub to implement a hard force
576932689eSPaul E. McKenney  * of synchronize_rcu(). This requires synchronizing
58842c0884SPetr Mladek  * tasks even in userspace and idle.
59842c0884SPetr Mladek  */
60842c0884SPetr Mladek static void klp_sync(struct work_struct *work)
61842c0884SPetr Mladek {
62842c0884SPetr Mladek }
63842c0884SPetr Mladek 
64842c0884SPetr Mladek /*
65842c0884SPetr Mladek  * We allow to patch also functions where RCU is not watching,
66842c0884SPetr Mladek  * e.g. before user_exit(). We can not rely on the RCU infrastructure
67842c0884SPetr Mladek  * to do the synchronization. Instead hard force the sched synchronization.
68842c0884SPetr Mladek  *
69842c0884SPetr Mladek  * This approach allows to use RCU functions for manipulating func_stack
70842c0884SPetr Mladek  * safely.
71842c0884SPetr Mladek  */
72842c0884SPetr Mladek static void klp_synchronize_transition(void)
73842c0884SPetr Mladek {
74842c0884SPetr Mladek 	schedule_on_each_cpu(klp_sync);
75842c0884SPetr Mladek }
76842c0884SPetr Mladek 
77842c0884SPetr Mladek /*
78d83a7cb3SJosh Poimboeuf  * The transition to the target patch state is complete.  Clean up the data
79d83a7cb3SJosh Poimboeuf  * structures.
80d83a7cb3SJosh Poimboeuf  */
81d83a7cb3SJosh Poimboeuf static void klp_complete_transition(void)
82d83a7cb3SJosh Poimboeuf {
83d83a7cb3SJosh Poimboeuf 	struct klp_object *obj;
84d83a7cb3SJosh Poimboeuf 	struct klp_func *func;
85d83a7cb3SJosh Poimboeuf 	struct task_struct *g, *task;
86d83a7cb3SJosh Poimboeuf 	unsigned int cpu;
87d83a7cb3SJosh Poimboeuf 
88af026796SJoe Lawrence 	pr_debug("'%s': completing %s transition\n",
89af026796SJoe Lawrence 		 klp_transition_patch->mod->name,
90af026796SJoe Lawrence 		 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
91af026796SJoe Lawrence 
92d697bad5SPetr Mladek 	if (klp_transition_patch->replace && klp_target_state == KLP_PATCHED) {
93e1452b60SJason Baron 		klp_discard_replaced_patches(klp_transition_patch);
94d697bad5SPetr Mladek 		klp_discard_nops(klp_transition_patch);
95d697bad5SPetr Mladek 	}
96e1452b60SJason Baron 
97d83a7cb3SJosh Poimboeuf 	if (klp_target_state == KLP_UNPATCHED) {
98d83a7cb3SJosh Poimboeuf 		/*
99d83a7cb3SJosh Poimboeuf 		 * All tasks have transitioned to KLP_UNPATCHED so we can now
100d83a7cb3SJosh Poimboeuf 		 * remove the new functions from the func_stack.
101d83a7cb3SJosh Poimboeuf 		 */
102d83a7cb3SJosh Poimboeuf 		klp_unpatch_objects(klp_transition_patch);
103d83a7cb3SJosh Poimboeuf 
104d83a7cb3SJosh Poimboeuf 		/*
105d83a7cb3SJosh Poimboeuf 		 * Make sure klp_ftrace_handler() can no longer see functions
106d83a7cb3SJosh Poimboeuf 		 * from this patch on the ops->func_stack.  Otherwise, after
107d83a7cb3SJosh Poimboeuf 		 * func->transition gets cleared, the handler may choose a
108d83a7cb3SJosh Poimboeuf 		 * removed function.
109d83a7cb3SJosh Poimboeuf 		 */
110842c0884SPetr Mladek 		klp_synchronize_transition();
111d83a7cb3SJosh Poimboeuf 	}
112d83a7cb3SJosh Poimboeuf 
113d0807da7SMiroslav Benes 	klp_for_each_object(klp_transition_patch, obj)
114d0807da7SMiroslav Benes 		klp_for_each_func(obj, func)
115d83a7cb3SJosh Poimboeuf 			func->transition = false;
1163ec24776SJosh Poimboeuf 
117d83a7cb3SJosh Poimboeuf 	/* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
118d83a7cb3SJosh Poimboeuf 	if (klp_target_state == KLP_PATCHED)
119842c0884SPetr Mladek 		klp_synchronize_transition();
120d83a7cb3SJosh Poimboeuf 
121d83a7cb3SJosh Poimboeuf 	read_lock(&tasklist_lock);
122d83a7cb3SJosh Poimboeuf 	for_each_process_thread(g, task) {
123d83a7cb3SJosh Poimboeuf 		WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
124d83a7cb3SJosh Poimboeuf 		task->patch_state = KLP_UNDEFINED;
125d83a7cb3SJosh Poimboeuf 	}
126d83a7cb3SJosh Poimboeuf 	read_unlock(&tasklist_lock);
127d83a7cb3SJosh Poimboeuf 
128d83a7cb3SJosh Poimboeuf 	for_each_possible_cpu(cpu) {
129d83a7cb3SJosh Poimboeuf 		task = idle_task(cpu);
130d83a7cb3SJosh Poimboeuf 		WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
131d83a7cb3SJosh Poimboeuf 		task->patch_state = KLP_UNDEFINED;
132d83a7cb3SJosh Poimboeuf 	}
133d83a7cb3SJosh Poimboeuf 
13493862e38SJoe Lawrence 	klp_for_each_object(klp_transition_patch, obj) {
13593862e38SJoe Lawrence 		if (!klp_is_object_loaded(obj))
13693862e38SJoe Lawrence 			continue;
13793862e38SJoe Lawrence 		if (klp_target_state == KLP_PATCHED)
13893862e38SJoe Lawrence 			klp_post_patch_callback(obj);
13993862e38SJoe Lawrence 		else if (klp_target_state == KLP_UNPATCHED)
14093862e38SJoe Lawrence 			klp_post_unpatch_callback(obj);
14193862e38SJoe Lawrence 	}
14293862e38SJoe Lawrence 
1436116c303SJoe Lawrence 	pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
1446116c303SJoe Lawrence 		  klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
1456116c303SJoe Lawrence 
146d83a7cb3SJosh Poimboeuf 	klp_target_state = KLP_UNDEFINED;
147d83a7cb3SJosh Poimboeuf 	klp_transition_patch = NULL;
148d83a7cb3SJosh Poimboeuf }
149d83a7cb3SJosh Poimboeuf 
150d83a7cb3SJosh Poimboeuf /*
151d83a7cb3SJosh Poimboeuf  * This is called in the error path, to cancel a transition before it has
152d83a7cb3SJosh Poimboeuf  * started, i.e. klp_init_transition() has been called but
153d83a7cb3SJosh Poimboeuf  * klp_start_transition() hasn't.  If the transition *has* been started,
154d83a7cb3SJosh Poimboeuf  * klp_reverse_transition() should be used instead.
155d83a7cb3SJosh Poimboeuf  */
156d83a7cb3SJosh Poimboeuf void klp_cancel_transition(void)
157d83a7cb3SJosh Poimboeuf {
1583ec24776SJosh Poimboeuf 	if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED))
1593ec24776SJosh Poimboeuf 		return;
1603ec24776SJosh Poimboeuf 
161af026796SJoe Lawrence 	pr_debug("'%s': canceling patching transition, going to unpatch\n",
162af026796SJoe Lawrence 		 klp_transition_patch->mod->name);
163af026796SJoe Lawrence 
1643ec24776SJosh Poimboeuf 	klp_target_state = KLP_UNPATCHED;
165d83a7cb3SJosh Poimboeuf 	klp_complete_transition();
166d83a7cb3SJosh Poimboeuf }
167d83a7cb3SJosh Poimboeuf 
168d83a7cb3SJosh Poimboeuf /*
169d83a7cb3SJosh Poimboeuf  * Switch the patched state of the task to the set of functions in the target
170d83a7cb3SJosh Poimboeuf  * patch state.
171d83a7cb3SJosh Poimboeuf  *
172d83a7cb3SJosh Poimboeuf  * NOTE: If task is not 'current', the caller must ensure the task is inactive.
173d83a7cb3SJosh Poimboeuf  * Otherwise klp_ftrace_handler() might read the wrong 'patch_state' value.
174d83a7cb3SJosh Poimboeuf  */
175d83a7cb3SJosh Poimboeuf void klp_update_patch_state(struct task_struct *task)
176d83a7cb3SJosh Poimboeuf {
177842c0884SPetr Mladek 	/*
1786932689eSPaul E. McKenney 	 * A variant of synchronize_rcu() is used to allow patching functions
179842c0884SPetr Mladek 	 * where RCU is not watching, see klp_synchronize_transition().
180842c0884SPetr Mladek 	 */
181842c0884SPetr Mladek 	preempt_disable_notrace();
182d83a7cb3SJosh Poimboeuf 
183d83a7cb3SJosh Poimboeuf 	/*
184d83a7cb3SJosh Poimboeuf 	 * This test_and_clear_tsk_thread_flag() call also serves as a read
185d83a7cb3SJosh Poimboeuf 	 * barrier (smp_rmb) for two cases:
186d83a7cb3SJosh Poimboeuf 	 *
187d83a7cb3SJosh Poimboeuf 	 * 1) Enforce the order of the TIF_PATCH_PENDING read and the
188d83a7cb3SJosh Poimboeuf 	 *    klp_target_state read.  The corresponding write barrier is in
189d83a7cb3SJosh Poimboeuf 	 *    klp_init_transition().
190d83a7cb3SJosh Poimboeuf 	 *
191d83a7cb3SJosh Poimboeuf 	 * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read
192d83a7cb3SJosh Poimboeuf 	 *    of func->transition, if klp_ftrace_handler() is called later on
193d83a7cb3SJosh Poimboeuf 	 *    the same CPU.  See __klp_disable_patch().
194d83a7cb3SJosh Poimboeuf 	 */
195d83a7cb3SJosh Poimboeuf 	if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
196d83a7cb3SJosh Poimboeuf 		task->patch_state = READ_ONCE(klp_target_state);
197d83a7cb3SJosh Poimboeuf 
198842c0884SPetr Mladek 	preempt_enable_notrace();
199d83a7cb3SJosh Poimboeuf }
200d83a7cb3SJosh Poimboeuf 
201d83a7cb3SJosh Poimboeuf /*
202d83a7cb3SJosh Poimboeuf  * Determine whether the given stack trace includes any references to a
203d83a7cb3SJosh Poimboeuf  * to-be-patched or to-be-unpatched function.
204d83a7cb3SJosh Poimboeuf  */
20525e39e32SThomas Gleixner static int klp_check_stack_func(struct klp_func *func, unsigned long *entries,
20625e39e32SThomas Gleixner 				unsigned int nr_entries)
207d83a7cb3SJosh Poimboeuf {
208d83a7cb3SJosh Poimboeuf 	unsigned long func_addr, func_size, address;
209d83a7cb3SJosh Poimboeuf 	struct klp_ops *ops;
210d83a7cb3SJosh Poimboeuf 	int i;
211d83a7cb3SJosh Poimboeuf 
21225e39e32SThomas Gleixner 	for (i = 0; i < nr_entries; i++) {
21325e39e32SThomas Gleixner 		address = entries[i];
214d83a7cb3SJosh Poimboeuf 
215d83a7cb3SJosh Poimboeuf 		if (klp_target_state == KLP_UNPATCHED) {
216d83a7cb3SJosh Poimboeuf 			 /*
217d83a7cb3SJosh Poimboeuf 			  * Check for the to-be-unpatched function
218d83a7cb3SJosh Poimboeuf 			  * (the func itself).
219d83a7cb3SJosh Poimboeuf 			  */
220d83a7cb3SJosh Poimboeuf 			func_addr = (unsigned long)func->new_func;
221d83a7cb3SJosh Poimboeuf 			func_size = func->new_size;
222d83a7cb3SJosh Poimboeuf 		} else {
223d83a7cb3SJosh Poimboeuf 			/*
224d83a7cb3SJosh Poimboeuf 			 * Check for the to-be-patched function
225d83a7cb3SJosh Poimboeuf 			 * (the previous func).
226d83a7cb3SJosh Poimboeuf 			 */
22719514910SPetr Mladek 			ops = klp_find_ops(func->old_func);
228d83a7cb3SJosh Poimboeuf 
229d83a7cb3SJosh Poimboeuf 			if (list_is_singular(&ops->func_stack)) {
230d83a7cb3SJosh Poimboeuf 				/* original function */
23119514910SPetr Mladek 				func_addr = (unsigned long)func->old_func;
232d83a7cb3SJosh Poimboeuf 				func_size = func->old_size;
233d83a7cb3SJosh Poimboeuf 			} else {
234d83a7cb3SJosh Poimboeuf 				/* previously patched function */
235d83a7cb3SJosh Poimboeuf 				struct klp_func *prev;
236d83a7cb3SJosh Poimboeuf 
237d83a7cb3SJosh Poimboeuf 				prev = list_next_entry(func, stack_node);
238d83a7cb3SJosh Poimboeuf 				func_addr = (unsigned long)prev->new_func;
239d83a7cb3SJosh Poimboeuf 				func_size = prev->new_size;
240d83a7cb3SJosh Poimboeuf 			}
241d83a7cb3SJosh Poimboeuf 		}
242d83a7cb3SJosh Poimboeuf 
243d83a7cb3SJosh Poimboeuf 		if (address >= func_addr && address < func_addr + func_size)
244d83a7cb3SJosh Poimboeuf 			return -EAGAIN;
245d83a7cb3SJosh Poimboeuf 	}
246d83a7cb3SJosh Poimboeuf 
247d83a7cb3SJosh Poimboeuf 	return 0;
248d83a7cb3SJosh Poimboeuf }
249d83a7cb3SJosh Poimboeuf 
250d83a7cb3SJosh Poimboeuf /*
251d83a7cb3SJosh Poimboeuf  * Determine whether it's safe to transition the task to the target patch state
252d83a7cb3SJosh Poimboeuf  * by looking for any to-be-patched or to-be-unpatched functions on its stack.
253d83a7cb3SJosh Poimboeuf  */
254d83a7cb3SJosh Poimboeuf static int klp_check_stack(struct task_struct *task, char *err_buf)
255d83a7cb3SJosh Poimboeuf {
256d83a7cb3SJosh Poimboeuf 	static unsigned long entries[MAX_STACK_ENTRIES];
257d83a7cb3SJosh Poimboeuf 	struct klp_object *obj;
258d83a7cb3SJosh Poimboeuf 	struct klp_func *func;
25925e39e32SThomas Gleixner 	int ret, nr_entries;
260d83a7cb3SJosh Poimboeuf 
26125e39e32SThomas Gleixner 	ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries));
262d83a7cb3SJosh Poimboeuf 	WARN_ON_ONCE(ret == -ENOSYS);
26325e39e32SThomas Gleixner 	if (ret < 0) {
264d83a7cb3SJosh Poimboeuf 		snprintf(err_buf, STACK_ERR_BUF_SIZE,
265d83a7cb3SJosh Poimboeuf 			 "%s: %s:%d has an unreliable stack\n",
266d83a7cb3SJosh Poimboeuf 			 __func__, task->comm, task->pid);
267d83a7cb3SJosh Poimboeuf 		return ret;
268d83a7cb3SJosh Poimboeuf 	}
26925e39e32SThomas Gleixner 	nr_entries = ret;
270d83a7cb3SJosh Poimboeuf 
271d83a7cb3SJosh Poimboeuf 	klp_for_each_object(klp_transition_patch, obj) {
272d83a7cb3SJosh Poimboeuf 		if (!obj->patched)
273d83a7cb3SJosh Poimboeuf 			continue;
274d83a7cb3SJosh Poimboeuf 		klp_for_each_func(obj, func) {
27525e39e32SThomas Gleixner 			ret = klp_check_stack_func(func, entries, nr_entries);
276d83a7cb3SJosh Poimboeuf 			if (ret) {
277d83a7cb3SJosh Poimboeuf 				snprintf(err_buf, STACK_ERR_BUF_SIZE,
278d83a7cb3SJosh Poimboeuf 					 "%s: %s:%d is sleeping on function %s\n",
279d83a7cb3SJosh Poimboeuf 					 __func__, task->comm, task->pid,
280d83a7cb3SJosh Poimboeuf 					 func->old_name);
281d83a7cb3SJosh Poimboeuf 				return ret;
282d83a7cb3SJosh Poimboeuf 			}
283d83a7cb3SJosh Poimboeuf 		}
284d83a7cb3SJosh Poimboeuf 	}
285d83a7cb3SJosh Poimboeuf 
286d83a7cb3SJosh Poimboeuf 	return 0;
287d83a7cb3SJosh Poimboeuf }
288d83a7cb3SJosh Poimboeuf 
289d83a7cb3SJosh Poimboeuf /*
290d83a7cb3SJosh Poimboeuf  * Try to safely switch a task to the target patch state.  If it's currently
291d83a7cb3SJosh Poimboeuf  * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or
292d83a7cb3SJosh Poimboeuf  * if the stack is unreliable, return false.
293d83a7cb3SJosh Poimboeuf  */
294d83a7cb3SJosh Poimboeuf static bool klp_try_switch_task(struct task_struct *task)
295d83a7cb3SJosh Poimboeuf {
296f36e6645SPetr Mladek 	static char err_buf[STACK_ERR_BUF_SIZE];
297d83a7cb3SJosh Poimboeuf 	struct rq *rq;
298d83a7cb3SJosh Poimboeuf 	struct rq_flags flags;
299d83a7cb3SJosh Poimboeuf 	int ret;
300d83a7cb3SJosh Poimboeuf 	bool success = false;
301d83a7cb3SJosh Poimboeuf 
302d83a7cb3SJosh Poimboeuf 	err_buf[0] = '\0';
303d83a7cb3SJosh Poimboeuf 
304d83a7cb3SJosh Poimboeuf 	/* check if this task has already switched over */
305d83a7cb3SJosh Poimboeuf 	if (task->patch_state == klp_target_state)
306d83a7cb3SJosh Poimboeuf 		return true;
307d83a7cb3SJosh Poimboeuf 
308d83a7cb3SJosh Poimboeuf 	/*
309*67059d65SMiroslav Benes 	 * For arches which don't have reliable stack traces, we have to rely
310*67059d65SMiroslav Benes 	 * on other methods (e.g., switching tasks at kernel exit).
311*67059d65SMiroslav Benes 	 */
312*67059d65SMiroslav Benes 	if (!klp_have_reliable_stack())
313*67059d65SMiroslav Benes 		return false;
314*67059d65SMiroslav Benes 
315*67059d65SMiroslav Benes 	/*
316d83a7cb3SJosh Poimboeuf 	 * Now try to check the stack for any to-be-patched or to-be-unpatched
317d83a7cb3SJosh Poimboeuf 	 * functions.  If all goes well, switch the task to the target patch
318d83a7cb3SJosh Poimboeuf 	 * state.
319d83a7cb3SJosh Poimboeuf 	 */
320d83a7cb3SJosh Poimboeuf 	rq = task_rq_lock(task, &flags);
321d83a7cb3SJosh Poimboeuf 
322d83a7cb3SJosh Poimboeuf 	if (task_running(rq, task) && task != current) {
323d83a7cb3SJosh Poimboeuf 		snprintf(err_buf, STACK_ERR_BUF_SIZE,
324d83a7cb3SJosh Poimboeuf 			 "%s: %s:%d is running\n", __func__, task->comm,
325d83a7cb3SJosh Poimboeuf 			 task->pid);
326d83a7cb3SJosh Poimboeuf 		goto done;
327d83a7cb3SJosh Poimboeuf 	}
328d83a7cb3SJosh Poimboeuf 
329d83a7cb3SJosh Poimboeuf 	ret = klp_check_stack(task, err_buf);
330d83a7cb3SJosh Poimboeuf 	if (ret)
331d83a7cb3SJosh Poimboeuf 		goto done;
332d83a7cb3SJosh Poimboeuf 
333d83a7cb3SJosh Poimboeuf 	success = true;
334d83a7cb3SJosh Poimboeuf 
335d83a7cb3SJosh Poimboeuf 	clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
336d83a7cb3SJosh Poimboeuf 	task->patch_state = klp_target_state;
337d83a7cb3SJosh Poimboeuf 
338d83a7cb3SJosh Poimboeuf done:
339d83a7cb3SJosh Poimboeuf 	task_rq_unlock(rq, task, &flags);
340d83a7cb3SJosh Poimboeuf 
341d83a7cb3SJosh Poimboeuf 	/*
342d83a7cb3SJosh Poimboeuf 	 * Due to console deadlock issues, pr_debug() can't be used while
343d83a7cb3SJosh Poimboeuf 	 * holding the task rq lock.  Instead we have to use a temporary buffer
344d83a7cb3SJosh Poimboeuf 	 * and print the debug message after releasing the lock.
345d83a7cb3SJosh Poimboeuf 	 */
346d83a7cb3SJosh Poimboeuf 	if (err_buf[0] != '\0')
347d83a7cb3SJosh Poimboeuf 		pr_debug("%s", err_buf);
348d83a7cb3SJosh Poimboeuf 
349d83a7cb3SJosh Poimboeuf 	return success;
350d83a7cb3SJosh Poimboeuf }
351d83a7cb3SJosh Poimboeuf 
352d83a7cb3SJosh Poimboeuf /*
3530b3d5279SMiroslav Benes  * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.
3540b3d5279SMiroslav Benes  * Kthreads with TIF_PATCH_PENDING set are woken up.
3550b3d5279SMiroslav Benes  */
3560b3d5279SMiroslav Benes static void klp_send_signals(void)
3570b3d5279SMiroslav Benes {
3580b3d5279SMiroslav Benes 	struct task_struct *g, *task;
3590b3d5279SMiroslav Benes 
3600b3d5279SMiroslav Benes 	if (klp_signals_cnt == SIGNALS_TIMEOUT)
3610b3d5279SMiroslav Benes 		pr_notice("signaling remaining tasks\n");
3620b3d5279SMiroslav Benes 
3630b3d5279SMiroslav Benes 	read_lock(&tasklist_lock);
3640b3d5279SMiroslav Benes 	for_each_process_thread(g, task) {
3650b3d5279SMiroslav Benes 		if (!klp_patch_pending(task))
3660b3d5279SMiroslav Benes 			continue;
3670b3d5279SMiroslav Benes 
3680b3d5279SMiroslav Benes 		/*
3690b3d5279SMiroslav Benes 		 * There is a small race here. We could see TIF_PATCH_PENDING
3700b3d5279SMiroslav Benes 		 * set and decide to wake up a kthread or send a fake signal.
3710b3d5279SMiroslav Benes 		 * Meanwhile the task could migrate itself and the action
3720b3d5279SMiroslav Benes 		 * would be meaningless. It is not serious though.
3730b3d5279SMiroslav Benes 		 */
3740b3d5279SMiroslav Benes 		if (task->flags & PF_KTHREAD) {
3750b3d5279SMiroslav Benes 			/*
3760b3d5279SMiroslav Benes 			 * Wake up a kthread which sleeps interruptedly and
3770b3d5279SMiroslav Benes 			 * still has not been migrated.
3780b3d5279SMiroslav Benes 			 */
3790b3d5279SMiroslav Benes 			wake_up_state(task, TASK_INTERRUPTIBLE);
3800b3d5279SMiroslav Benes 		} else {
3810b3d5279SMiroslav Benes 			/*
3820b3d5279SMiroslav Benes 			 * Send fake signal to all non-kthread tasks which are
3830b3d5279SMiroslav Benes 			 * still not migrated.
3840b3d5279SMiroslav Benes 			 */
3850b3d5279SMiroslav Benes 			spin_lock_irq(&task->sighand->siglock);
3860b3d5279SMiroslav Benes 			signal_wake_up(task, 0);
3870b3d5279SMiroslav Benes 			spin_unlock_irq(&task->sighand->siglock);
3880b3d5279SMiroslav Benes 		}
3890b3d5279SMiroslav Benes 	}
3900b3d5279SMiroslav Benes 	read_unlock(&tasklist_lock);
3910b3d5279SMiroslav Benes }
3920b3d5279SMiroslav Benes 
3930b3d5279SMiroslav Benes /*
394d83a7cb3SJosh Poimboeuf  * Try to switch all remaining tasks to the target patch state by walking the
395d83a7cb3SJosh Poimboeuf  * stacks of sleeping tasks and looking for any to-be-patched or
396d83a7cb3SJosh Poimboeuf  * to-be-unpatched functions.  If such functions are found, the task can't be
397d83a7cb3SJosh Poimboeuf  * switched yet.
398d83a7cb3SJosh Poimboeuf  *
399d83a7cb3SJosh Poimboeuf  * If any tasks are still stuck in the initial patch state, schedule a retry.
400d83a7cb3SJosh Poimboeuf  */
401d83a7cb3SJosh Poimboeuf void klp_try_complete_transition(void)
402d83a7cb3SJosh Poimboeuf {
403d83a7cb3SJosh Poimboeuf 	unsigned int cpu;
404d83a7cb3SJosh Poimboeuf 	struct task_struct *g, *task;
405958ef1e3SPetr Mladek 	struct klp_patch *patch;
406d83a7cb3SJosh Poimboeuf 	bool complete = true;
407d83a7cb3SJosh Poimboeuf 
408d83a7cb3SJosh Poimboeuf 	WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
409d83a7cb3SJosh Poimboeuf 
410d83a7cb3SJosh Poimboeuf 	/*
411d83a7cb3SJosh Poimboeuf 	 * Try to switch the tasks to the target patch state by walking their
412d83a7cb3SJosh Poimboeuf 	 * stacks and looking for any to-be-patched or to-be-unpatched
413d83a7cb3SJosh Poimboeuf 	 * functions.  If such functions are found on a stack, or if the stack
414d83a7cb3SJosh Poimboeuf 	 * is deemed unreliable, the task can't be switched yet.
415d83a7cb3SJosh Poimboeuf 	 *
416d83a7cb3SJosh Poimboeuf 	 * Usually this will transition most (or all) of the tasks on a system
417d83a7cb3SJosh Poimboeuf 	 * unless the patch includes changes to a very common function.
418d83a7cb3SJosh Poimboeuf 	 */
419d83a7cb3SJosh Poimboeuf 	read_lock(&tasklist_lock);
420d83a7cb3SJosh Poimboeuf 	for_each_process_thread(g, task)
421d83a7cb3SJosh Poimboeuf 		if (!klp_try_switch_task(task))
422d83a7cb3SJosh Poimboeuf 			complete = false;
423d83a7cb3SJosh Poimboeuf 	read_unlock(&tasklist_lock);
424d83a7cb3SJosh Poimboeuf 
425d83a7cb3SJosh Poimboeuf 	/*
426d83a7cb3SJosh Poimboeuf 	 * Ditto for the idle "swapper" tasks.
427d83a7cb3SJosh Poimboeuf 	 */
428d83a7cb3SJosh Poimboeuf 	get_online_cpus();
429d83a7cb3SJosh Poimboeuf 	for_each_possible_cpu(cpu) {
430d83a7cb3SJosh Poimboeuf 		task = idle_task(cpu);
431d83a7cb3SJosh Poimboeuf 		if (cpu_online(cpu)) {
432d83a7cb3SJosh Poimboeuf 			if (!klp_try_switch_task(task))
433d83a7cb3SJosh Poimboeuf 				complete = false;
434d83a7cb3SJosh Poimboeuf 		} else if (task->patch_state != klp_target_state) {
435d83a7cb3SJosh Poimboeuf 			/* offline idle tasks can be switched immediately */
436d83a7cb3SJosh Poimboeuf 			clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
437d83a7cb3SJosh Poimboeuf 			task->patch_state = klp_target_state;
438d83a7cb3SJosh Poimboeuf 		}
439d83a7cb3SJosh Poimboeuf 	}
440d83a7cb3SJosh Poimboeuf 	put_online_cpus();
441d83a7cb3SJosh Poimboeuf 
442d83a7cb3SJosh Poimboeuf 	if (!complete) {
443cba82deaSMiroslav Benes 		if (klp_signals_cnt && !(klp_signals_cnt % SIGNALS_TIMEOUT))
444cba82deaSMiroslav Benes 			klp_send_signals();
445cba82deaSMiroslav Benes 		klp_signals_cnt++;
446cba82deaSMiroslav Benes 
447d83a7cb3SJosh Poimboeuf 		/*
448d83a7cb3SJosh Poimboeuf 		 * Some tasks weren't able to be switched over.  Try again
449d83a7cb3SJosh Poimboeuf 		 * later and/or wait for other methods like kernel exit
450d83a7cb3SJosh Poimboeuf 		 * switching.
451d83a7cb3SJosh Poimboeuf 		 */
452d83a7cb3SJosh Poimboeuf 		schedule_delayed_work(&klp_transition_work,
453d83a7cb3SJosh Poimboeuf 				      round_jiffies_relative(HZ));
454d83a7cb3SJosh Poimboeuf 		return;
455d83a7cb3SJosh Poimboeuf 	}
456d83a7cb3SJosh Poimboeuf 
457d83a7cb3SJosh Poimboeuf 	/* we're done, now cleanup the data structures */
458958ef1e3SPetr Mladek 	patch = klp_transition_patch;
459d83a7cb3SJosh Poimboeuf 	klp_complete_transition();
460958ef1e3SPetr Mladek 
461958ef1e3SPetr Mladek 	/*
462958ef1e3SPetr Mladek 	 * It would make more sense to free the patch in
463958ef1e3SPetr Mladek 	 * klp_complete_transition() but it is called also
464958ef1e3SPetr Mladek 	 * from klp_cancel_transition().
465958ef1e3SPetr Mladek 	 */
466958ef1e3SPetr Mladek 	if (!patch->enabled) {
467958ef1e3SPetr Mladek 		klp_free_patch_start(patch);
468958ef1e3SPetr Mladek 		schedule_work(&patch->free_work);
469958ef1e3SPetr Mladek 	}
470d83a7cb3SJosh Poimboeuf }
471d83a7cb3SJosh Poimboeuf 
472d83a7cb3SJosh Poimboeuf /*
473d83a7cb3SJosh Poimboeuf  * Start the transition to the specified target patch state so tasks can begin
474d83a7cb3SJosh Poimboeuf  * switching to it.
475d83a7cb3SJosh Poimboeuf  */
476d83a7cb3SJosh Poimboeuf void klp_start_transition(void)
477d83a7cb3SJosh Poimboeuf {
478d83a7cb3SJosh Poimboeuf 	struct task_struct *g, *task;
479d83a7cb3SJosh Poimboeuf 	unsigned int cpu;
480d83a7cb3SJosh Poimboeuf 
481d83a7cb3SJosh Poimboeuf 	WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
482d83a7cb3SJosh Poimboeuf 
483af026796SJoe Lawrence 	pr_notice("'%s': starting %s transition\n",
484af026796SJoe Lawrence 		  klp_transition_patch->mod->name,
485d83a7cb3SJosh Poimboeuf 		  klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
486d83a7cb3SJosh Poimboeuf 
487d83a7cb3SJosh Poimboeuf 	/*
488d83a7cb3SJosh Poimboeuf 	 * Mark all normal tasks as needing a patch state update.  They'll
489d83a7cb3SJosh Poimboeuf 	 * switch either in klp_try_complete_transition() or as they exit the
490d83a7cb3SJosh Poimboeuf 	 * kernel.
491d83a7cb3SJosh Poimboeuf 	 */
492d83a7cb3SJosh Poimboeuf 	read_lock(&tasklist_lock);
493d83a7cb3SJosh Poimboeuf 	for_each_process_thread(g, task)
494d83a7cb3SJosh Poimboeuf 		if (task->patch_state != klp_target_state)
495d83a7cb3SJosh Poimboeuf 			set_tsk_thread_flag(task, TIF_PATCH_PENDING);
496d83a7cb3SJosh Poimboeuf 	read_unlock(&tasklist_lock);
497d83a7cb3SJosh Poimboeuf 
498d83a7cb3SJosh Poimboeuf 	/*
499d83a7cb3SJosh Poimboeuf 	 * Mark all idle tasks as needing a patch state update.  They'll switch
500d83a7cb3SJosh Poimboeuf 	 * either in klp_try_complete_transition() or at the idle loop switch
501d83a7cb3SJosh Poimboeuf 	 * point.
502d83a7cb3SJosh Poimboeuf 	 */
503d83a7cb3SJosh Poimboeuf 	for_each_possible_cpu(cpu) {
504d83a7cb3SJosh Poimboeuf 		task = idle_task(cpu);
505d83a7cb3SJosh Poimboeuf 		if (task->patch_state != klp_target_state)
506d83a7cb3SJosh Poimboeuf 			set_tsk_thread_flag(task, TIF_PATCH_PENDING);
507d83a7cb3SJosh Poimboeuf 	}
508cba82deaSMiroslav Benes 
509cba82deaSMiroslav Benes 	klp_signals_cnt = 0;
510d83a7cb3SJosh Poimboeuf }
511d83a7cb3SJosh Poimboeuf 
512d83a7cb3SJosh Poimboeuf /*
513d83a7cb3SJosh Poimboeuf  * Initialize the global target patch state and all tasks to the initial patch
514d83a7cb3SJosh Poimboeuf  * state, and initialize all function transition states to true in preparation
515d83a7cb3SJosh Poimboeuf  * for patching or unpatching.
516d83a7cb3SJosh Poimboeuf  */
517d83a7cb3SJosh Poimboeuf void klp_init_transition(struct klp_patch *patch, int state)
518d83a7cb3SJosh Poimboeuf {
519d83a7cb3SJosh Poimboeuf 	struct task_struct *g, *task;
520d83a7cb3SJosh Poimboeuf 	unsigned int cpu;
521d83a7cb3SJosh Poimboeuf 	struct klp_object *obj;
522d83a7cb3SJosh Poimboeuf 	struct klp_func *func;
523d83a7cb3SJosh Poimboeuf 	int initial_state = !state;
524d83a7cb3SJosh Poimboeuf 
525d83a7cb3SJosh Poimboeuf 	WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED);
526d83a7cb3SJosh Poimboeuf 
527d83a7cb3SJosh Poimboeuf 	klp_transition_patch = patch;
528d83a7cb3SJosh Poimboeuf 
529d83a7cb3SJosh Poimboeuf 	/*
530d83a7cb3SJosh Poimboeuf 	 * Set the global target patch state which tasks will switch to.  This
531d83a7cb3SJosh Poimboeuf 	 * has no effect until the TIF_PATCH_PENDING flags get set later.
532d83a7cb3SJosh Poimboeuf 	 */
533d83a7cb3SJosh Poimboeuf 	klp_target_state = state;
534d83a7cb3SJosh Poimboeuf 
535af026796SJoe Lawrence 	pr_debug("'%s': initializing %s transition\n", patch->mod->name,
536af026796SJoe Lawrence 		 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
537af026796SJoe Lawrence 
538d83a7cb3SJosh Poimboeuf 	/*
539d83a7cb3SJosh Poimboeuf 	 * Initialize all tasks to the initial patch state to prepare them for
540d83a7cb3SJosh Poimboeuf 	 * switching to the target state.
541d83a7cb3SJosh Poimboeuf 	 */
542d83a7cb3SJosh Poimboeuf 	read_lock(&tasklist_lock);
543d83a7cb3SJosh Poimboeuf 	for_each_process_thread(g, task) {
544d83a7cb3SJosh Poimboeuf 		WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
545d83a7cb3SJosh Poimboeuf 		task->patch_state = initial_state;
546d83a7cb3SJosh Poimboeuf 	}
547d83a7cb3SJosh Poimboeuf 	read_unlock(&tasklist_lock);
548d83a7cb3SJosh Poimboeuf 
549d83a7cb3SJosh Poimboeuf 	/*
550d83a7cb3SJosh Poimboeuf 	 * Ditto for the idle "swapper" tasks.
551d83a7cb3SJosh Poimboeuf 	 */
552d83a7cb3SJosh Poimboeuf 	for_each_possible_cpu(cpu) {
553d83a7cb3SJosh Poimboeuf 		task = idle_task(cpu);
554d83a7cb3SJosh Poimboeuf 		WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
555d83a7cb3SJosh Poimboeuf 		task->patch_state = initial_state;
556d83a7cb3SJosh Poimboeuf 	}
557d83a7cb3SJosh Poimboeuf 
558d83a7cb3SJosh Poimboeuf 	/*
559d83a7cb3SJosh Poimboeuf 	 * Enforce the order of the task->patch_state initializations and the
560d83a7cb3SJosh Poimboeuf 	 * func->transition updates to ensure that klp_ftrace_handler() doesn't
561d83a7cb3SJosh Poimboeuf 	 * see a func in transition with a task->patch_state of KLP_UNDEFINED.
562d83a7cb3SJosh Poimboeuf 	 *
563d83a7cb3SJosh Poimboeuf 	 * Also enforce the order of the klp_target_state write and future
564d83a7cb3SJosh Poimboeuf 	 * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() doesn't
565d83a7cb3SJosh Poimboeuf 	 * set a task->patch_state to KLP_UNDEFINED.
566d83a7cb3SJosh Poimboeuf 	 */
567d83a7cb3SJosh Poimboeuf 	smp_wmb();
568d83a7cb3SJosh Poimboeuf 
569d83a7cb3SJosh Poimboeuf 	/*
570d83a7cb3SJosh Poimboeuf 	 * Set the func transition states so klp_ftrace_handler() will know to
571d83a7cb3SJosh Poimboeuf 	 * switch to the transition logic.
572d83a7cb3SJosh Poimboeuf 	 *
573d83a7cb3SJosh Poimboeuf 	 * When patching, the funcs aren't yet in the func_stack and will be
574d83a7cb3SJosh Poimboeuf 	 * made visible to the ftrace handler shortly by the calls to
575d83a7cb3SJosh Poimboeuf 	 * klp_patch_object().
576d83a7cb3SJosh Poimboeuf 	 *
577d83a7cb3SJosh Poimboeuf 	 * When unpatching, the funcs are already in the func_stack and so are
578d83a7cb3SJosh Poimboeuf 	 * already visible to the ftrace handler.
579d83a7cb3SJosh Poimboeuf 	 */
580d83a7cb3SJosh Poimboeuf 	klp_for_each_object(patch, obj)
581d83a7cb3SJosh Poimboeuf 		klp_for_each_func(obj, func)
582d83a7cb3SJosh Poimboeuf 			func->transition = true;
583d83a7cb3SJosh Poimboeuf }
584d83a7cb3SJosh Poimboeuf 
585d83a7cb3SJosh Poimboeuf /*
586d83a7cb3SJosh Poimboeuf  * This function can be called in the middle of an existing transition to
587d83a7cb3SJosh Poimboeuf  * reverse the direction of the target patch state.  This can be done to
588d83a7cb3SJosh Poimboeuf  * effectively cancel an existing enable or disable operation if there are any
589d83a7cb3SJosh Poimboeuf  * tasks which are stuck in the initial patch state.
590d83a7cb3SJosh Poimboeuf  */
591d83a7cb3SJosh Poimboeuf void klp_reverse_transition(void)
592d83a7cb3SJosh Poimboeuf {
593d83a7cb3SJosh Poimboeuf 	unsigned int cpu;
594d83a7cb3SJosh Poimboeuf 	struct task_struct *g, *task;
595d83a7cb3SJosh Poimboeuf 
596af026796SJoe Lawrence 	pr_debug("'%s': reversing transition from %s\n",
597af026796SJoe Lawrence 		 klp_transition_patch->mod->name,
598af026796SJoe Lawrence 		 klp_target_state == KLP_PATCHED ? "patching to unpatching" :
599af026796SJoe Lawrence 						   "unpatching to patching");
600af026796SJoe Lawrence 
601d83a7cb3SJosh Poimboeuf 	klp_transition_patch->enabled = !klp_transition_patch->enabled;
602d83a7cb3SJosh Poimboeuf 
603d83a7cb3SJosh Poimboeuf 	klp_target_state = !klp_target_state;
604d83a7cb3SJosh Poimboeuf 
605d83a7cb3SJosh Poimboeuf 	/*
606d83a7cb3SJosh Poimboeuf 	 * Clear all TIF_PATCH_PENDING flags to prevent races caused by
607d83a7cb3SJosh Poimboeuf 	 * klp_update_patch_state() running in parallel with
608d83a7cb3SJosh Poimboeuf 	 * klp_start_transition().
609d83a7cb3SJosh Poimboeuf 	 */
610d83a7cb3SJosh Poimboeuf 	read_lock(&tasklist_lock);
611d83a7cb3SJosh Poimboeuf 	for_each_process_thread(g, task)
612d83a7cb3SJosh Poimboeuf 		clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
613d83a7cb3SJosh Poimboeuf 	read_unlock(&tasklist_lock);
614d83a7cb3SJosh Poimboeuf 
615d83a7cb3SJosh Poimboeuf 	for_each_possible_cpu(cpu)
616d83a7cb3SJosh Poimboeuf 		clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
617d83a7cb3SJosh Poimboeuf 
618d83a7cb3SJosh Poimboeuf 	/* Let any remaining calls to klp_update_patch_state() complete */
619842c0884SPetr Mladek 	klp_synchronize_transition();
620d83a7cb3SJosh Poimboeuf 
621d83a7cb3SJosh Poimboeuf 	klp_start_transition();
622d83a7cb3SJosh Poimboeuf }
623d83a7cb3SJosh Poimboeuf 
624d83a7cb3SJosh Poimboeuf /* Called from copy_process() during fork */
625d83a7cb3SJosh Poimboeuf void klp_copy_process(struct task_struct *child)
626d83a7cb3SJosh Poimboeuf {
627d83a7cb3SJosh Poimboeuf 	child->patch_state = current->patch_state;
628d83a7cb3SJosh Poimboeuf 
629d83a7cb3SJosh Poimboeuf 	/* TIF_PATCH_PENDING gets copied in setup_thread_stack() */
630d83a7cb3SJosh Poimboeuf }
63143347d56SMiroslav Benes 
63243347d56SMiroslav Benes /*
633c99a2be7SMiroslav Benes  * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an
634c99a2be7SMiroslav Benes  * existing transition to finish.
635c99a2be7SMiroslav Benes  *
636c99a2be7SMiroslav Benes  * NOTE: klp_update_patch_state(task) requires the task to be inactive or
637c99a2be7SMiroslav Benes  * 'current'. This is not the case here and the consistency model could be
638c99a2be7SMiroslav Benes  * broken. Administrator, who is the only one to execute the
639c99a2be7SMiroslav Benes  * klp_force_transitions(), has to be aware of this.
640c99a2be7SMiroslav Benes  */
641c99a2be7SMiroslav Benes void klp_force_transition(void)
642c99a2be7SMiroslav Benes {
64368007289SPetr Mladek 	struct klp_patch *patch;
644c99a2be7SMiroslav Benes 	struct task_struct *g, *task;
645c99a2be7SMiroslav Benes 	unsigned int cpu;
646c99a2be7SMiroslav Benes 
647c99a2be7SMiroslav Benes 	pr_warn("forcing remaining tasks to the patched state\n");
648c99a2be7SMiroslav Benes 
649c99a2be7SMiroslav Benes 	read_lock(&tasklist_lock);
650c99a2be7SMiroslav Benes 	for_each_process_thread(g, task)
651c99a2be7SMiroslav Benes 		klp_update_patch_state(task);
652c99a2be7SMiroslav Benes 	read_unlock(&tasklist_lock);
653c99a2be7SMiroslav Benes 
654c99a2be7SMiroslav Benes 	for_each_possible_cpu(cpu)
655c99a2be7SMiroslav Benes 		klp_update_patch_state(idle_task(cpu));
656c99a2be7SMiroslav Benes 
657ecba29f4SPetr Mladek 	klp_for_each_patch(patch)
65868007289SPetr Mladek 		patch->forced = true;
659c99a2be7SMiroslav Benes }
660