1d83a7cb3SJosh Poimboeuf /* 2d83a7cb3SJosh Poimboeuf * transition.c - Kernel Live Patching transition functions 3d83a7cb3SJosh Poimboeuf * 4d83a7cb3SJosh Poimboeuf * Copyright (C) 2015-2016 Josh Poimboeuf <jpoimboe@redhat.com> 5d83a7cb3SJosh Poimboeuf * 6d83a7cb3SJosh Poimboeuf * This program is free software; you can redistribute it and/or 7d83a7cb3SJosh Poimboeuf * modify it under the terms of the GNU General Public License 8d83a7cb3SJosh Poimboeuf * as published by the Free Software Foundation; either version 2 9d83a7cb3SJosh Poimboeuf * of the License, or (at your option) any later version. 10d83a7cb3SJosh Poimboeuf * 11d83a7cb3SJosh Poimboeuf * This program is distributed in the hope that it will be useful, 12d83a7cb3SJosh Poimboeuf * but WITHOUT ANY WARRANTY; without even the implied warranty of 13d83a7cb3SJosh Poimboeuf * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14d83a7cb3SJosh Poimboeuf * GNU General Public License for more details. 15d83a7cb3SJosh Poimboeuf * 16d83a7cb3SJosh Poimboeuf * You should have received a copy of the GNU General Public License 17d83a7cb3SJosh Poimboeuf * along with this program; if not, see <http://www.gnu.org/licenses/>. 18d83a7cb3SJosh Poimboeuf */ 19d83a7cb3SJosh Poimboeuf 20d83a7cb3SJosh Poimboeuf #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 21d83a7cb3SJosh Poimboeuf 22d83a7cb3SJosh Poimboeuf #include <linux/cpu.h> 23d83a7cb3SJosh Poimboeuf #include <linux/stacktrace.h> 2410517429SJiri Kosina #include "core.h" 25d83a7cb3SJosh Poimboeuf #include "patch.h" 26d83a7cb3SJosh Poimboeuf #include "transition.h" 27d83a7cb3SJosh Poimboeuf #include "../sched/sched.h" 28d83a7cb3SJosh Poimboeuf 29d83a7cb3SJosh Poimboeuf #define MAX_STACK_ENTRIES 100 30d83a7cb3SJosh Poimboeuf #define STACK_ERR_BUF_SIZE 128 31d83a7cb3SJosh Poimboeuf 32d83a7cb3SJosh Poimboeuf struct klp_patch *klp_transition_patch; 33d83a7cb3SJosh Poimboeuf 34d83a7cb3SJosh Poimboeuf static int klp_target_state = KLP_UNDEFINED; 35d83a7cb3SJosh Poimboeuf 36d83a7cb3SJosh Poimboeuf /* 37d83a7cb3SJosh Poimboeuf * This work can be performed periodically to finish patching or unpatching any 38d83a7cb3SJosh Poimboeuf * "straggler" tasks which failed to transition in the first attempt. 39d83a7cb3SJosh Poimboeuf */ 40d83a7cb3SJosh Poimboeuf static void klp_transition_work_fn(struct work_struct *work) 41d83a7cb3SJosh Poimboeuf { 42d83a7cb3SJosh Poimboeuf mutex_lock(&klp_mutex); 43d83a7cb3SJosh Poimboeuf 44d83a7cb3SJosh Poimboeuf if (klp_transition_patch) 45d83a7cb3SJosh Poimboeuf klp_try_complete_transition(); 46d83a7cb3SJosh Poimboeuf 47d83a7cb3SJosh Poimboeuf mutex_unlock(&klp_mutex); 48d83a7cb3SJosh Poimboeuf } 49d83a7cb3SJosh Poimboeuf static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn); 50d83a7cb3SJosh Poimboeuf 51d83a7cb3SJosh Poimboeuf /* 52842c0884SPetr Mladek * This function is just a stub to implement a hard force 536932689eSPaul E. McKenney * of synchronize_rcu(). This requires synchronizing 54842c0884SPetr Mladek * tasks even in userspace and idle. 55842c0884SPetr Mladek */ 56842c0884SPetr Mladek static void klp_sync(struct work_struct *work) 57842c0884SPetr Mladek { 58842c0884SPetr Mladek } 59842c0884SPetr Mladek 60842c0884SPetr Mladek /* 61842c0884SPetr Mladek * We allow to patch also functions where RCU is not watching, 62842c0884SPetr Mladek * e.g. before user_exit(). We can not rely on the RCU infrastructure 63842c0884SPetr Mladek * to do the synchronization. Instead hard force the sched synchronization. 64842c0884SPetr Mladek * 65842c0884SPetr Mladek * This approach allows to use RCU functions for manipulating func_stack 66842c0884SPetr Mladek * safely. 67842c0884SPetr Mladek */ 68842c0884SPetr Mladek static void klp_synchronize_transition(void) 69842c0884SPetr Mladek { 70842c0884SPetr Mladek schedule_on_each_cpu(klp_sync); 71842c0884SPetr Mladek } 72842c0884SPetr Mladek 73842c0884SPetr Mladek /* 74d83a7cb3SJosh Poimboeuf * The transition to the target patch state is complete. Clean up the data 75d83a7cb3SJosh Poimboeuf * structures. 76d83a7cb3SJosh Poimboeuf */ 77d83a7cb3SJosh Poimboeuf static void klp_complete_transition(void) 78d83a7cb3SJosh Poimboeuf { 79d83a7cb3SJosh Poimboeuf struct klp_object *obj; 80d83a7cb3SJosh Poimboeuf struct klp_func *func; 81d83a7cb3SJosh Poimboeuf struct task_struct *g, *task; 82d83a7cb3SJosh Poimboeuf unsigned int cpu; 83d83a7cb3SJosh Poimboeuf 84af026796SJoe Lawrence pr_debug("'%s': completing %s transition\n", 85af026796SJoe Lawrence klp_transition_patch->mod->name, 86af026796SJoe Lawrence klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); 87af026796SJoe Lawrence 88d83a7cb3SJosh Poimboeuf if (klp_target_state == KLP_UNPATCHED) { 89d83a7cb3SJosh Poimboeuf /* 90d83a7cb3SJosh Poimboeuf * All tasks have transitioned to KLP_UNPATCHED so we can now 91d83a7cb3SJosh Poimboeuf * remove the new functions from the func_stack. 92d83a7cb3SJosh Poimboeuf */ 93d83a7cb3SJosh Poimboeuf klp_unpatch_objects(klp_transition_patch); 94d83a7cb3SJosh Poimboeuf 95d83a7cb3SJosh Poimboeuf /* 96d83a7cb3SJosh Poimboeuf * Make sure klp_ftrace_handler() can no longer see functions 97d83a7cb3SJosh Poimboeuf * from this patch on the ops->func_stack. Otherwise, after 98d83a7cb3SJosh Poimboeuf * func->transition gets cleared, the handler may choose a 99d83a7cb3SJosh Poimboeuf * removed function. 100d83a7cb3SJosh Poimboeuf */ 101842c0884SPetr Mladek klp_synchronize_transition(); 102d83a7cb3SJosh Poimboeuf } 103d83a7cb3SJosh Poimboeuf 104d0807da7SMiroslav Benes klp_for_each_object(klp_transition_patch, obj) 105d0807da7SMiroslav Benes klp_for_each_func(obj, func) 106d83a7cb3SJosh Poimboeuf func->transition = false; 1073ec24776SJosh Poimboeuf 108d83a7cb3SJosh Poimboeuf /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */ 109d83a7cb3SJosh Poimboeuf if (klp_target_state == KLP_PATCHED) 110842c0884SPetr Mladek klp_synchronize_transition(); 111d83a7cb3SJosh Poimboeuf 112d83a7cb3SJosh Poimboeuf read_lock(&tasklist_lock); 113d83a7cb3SJosh Poimboeuf for_each_process_thread(g, task) { 114d83a7cb3SJosh Poimboeuf WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); 115d83a7cb3SJosh Poimboeuf task->patch_state = KLP_UNDEFINED; 116d83a7cb3SJosh Poimboeuf } 117d83a7cb3SJosh Poimboeuf read_unlock(&tasklist_lock); 118d83a7cb3SJosh Poimboeuf 119d83a7cb3SJosh Poimboeuf for_each_possible_cpu(cpu) { 120d83a7cb3SJosh Poimboeuf task = idle_task(cpu); 121d83a7cb3SJosh Poimboeuf WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); 122d83a7cb3SJosh Poimboeuf task->patch_state = KLP_UNDEFINED; 123d83a7cb3SJosh Poimboeuf } 124d83a7cb3SJosh Poimboeuf 12593862e38SJoe Lawrence klp_for_each_object(klp_transition_patch, obj) { 12693862e38SJoe Lawrence if (!klp_is_object_loaded(obj)) 12793862e38SJoe Lawrence continue; 12893862e38SJoe Lawrence if (klp_target_state == KLP_PATCHED) 12993862e38SJoe Lawrence klp_post_patch_callback(obj); 13093862e38SJoe Lawrence else if (klp_target_state == KLP_UNPATCHED) 13193862e38SJoe Lawrence klp_post_unpatch_callback(obj); 13293862e38SJoe Lawrence } 13393862e38SJoe Lawrence 1346116c303SJoe Lawrence pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name, 1356116c303SJoe Lawrence klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); 1366116c303SJoe Lawrence 137d83a7cb3SJosh Poimboeuf klp_target_state = KLP_UNDEFINED; 138d83a7cb3SJosh Poimboeuf klp_transition_patch = NULL; 139d83a7cb3SJosh Poimboeuf } 140d83a7cb3SJosh Poimboeuf 141d83a7cb3SJosh Poimboeuf /* 142d83a7cb3SJosh Poimboeuf * This is called in the error path, to cancel a transition before it has 143d83a7cb3SJosh Poimboeuf * started, i.e. klp_init_transition() has been called but 144d83a7cb3SJosh Poimboeuf * klp_start_transition() hasn't. If the transition *has* been started, 145d83a7cb3SJosh Poimboeuf * klp_reverse_transition() should be used instead. 146d83a7cb3SJosh Poimboeuf */ 147d83a7cb3SJosh Poimboeuf void klp_cancel_transition(void) 148d83a7cb3SJosh Poimboeuf { 1493ec24776SJosh Poimboeuf if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED)) 1503ec24776SJosh Poimboeuf return; 1513ec24776SJosh Poimboeuf 152af026796SJoe Lawrence pr_debug("'%s': canceling patching transition, going to unpatch\n", 153af026796SJoe Lawrence klp_transition_patch->mod->name); 154af026796SJoe Lawrence 1553ec24776SJosh Poimboeuf klp_target_state = KLP_UNPATCHED; 156d83a7cb3SJosh Poimboeuf klp_complete_transition(); 157d83a7cb3SJosh Poimboeuf } 158d83a7cb3SJosh Poimboeuf 159d83a7cb3SJosh Poimboeuf /* 160d83a7cb3SJosh Poimboeuf * Switch the patched state of the task to the set of functions in the target 161d83a7cb3SJosh Poimboeuf * patch state. 162d83a7cb3SJosh Poimboeuf * 163d83a7cb3SJosh Poimboeuf * NOTE: If task is not 'current', the caller must ensure the task is inactive. 164d83a7cb3SJosh Poimboeuf * Otherwise klp_ftrace_handler() might read the wrong 'patch_state' value. 165d83a7cb3SJosh Poimboeuf */ 166d83a7cb3SJosh Poimboeuf void klp_update_patch_state(struct task_struct *task) 167d83a7cb3SJosh Poimboeuf { 168842c0884SPetr Mladek /* 1696932689eSPaul E. McKenney * A variant of synchronize_rcu() is used to allow patching functions 170842c0884SPetr Mladek * where RCU is not watching, see klp_synchronize_transition(). 171842c0884SPetr Mladek */ 172842c0884SPetr Mladek preempt_disable_notrace(); 173d83a7cb3SJosh Poimboeuf 174d83a7cb3SJosh Poimboeuf /* 175d83a7cb3SJosh Poimboeuf * This test_and_clear_tsk_thread_flag() call also serves as a read 176d83a7cb3SJosh Poimboeuf * barrier (smp_rmb) for two cases: 177d83a7cb3SJosh Poimboeuf * 178d83a7cb3SJosh Poimboeuf * 1) Enforce the order of the TIF_PATCH_PENDING read and the 179d83a7cb3SJosh Poimboeuf * klp_target_state read. The corresponding write barrier is in 180d83a7cb3SJosh Poimboeuf * klp_init_transition(). 181d83a7cb3SJosh Poimboeuf * 182d83a7cb3SJosh Poimboeuf * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read 183d83a7cb3SJosh Poimboeuf * of func->transition, if klp_ftrace_handler() is called later on 184d83a7cb3SJosh Poimboeuf * the same CPU. See __klp_disable_patch(). 185d83a7cb3SJosh Poimboeuf */ 186d83a7cb3SJosh Poimboeuf if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING)) 187d83a7cb3SJosh Poimboeuf task->patch_state = READ_ONCE(klp_target_state); 188d83a7cb3SJosh Poimboeuf 189842c0884SPetr Mladek preempt_enable_notrace(); 190d83a7cb3SJosh Poimboeuf } 191d83a7cb3SJosh Poimboeuf 192d83a7cb3SJosh Poimboeuf /* 193d83a7cb3SJosh Poimboeuf * Determine whether the given stack trace includes any references to a 194d83a7cb3SJosh Poimboeuf * to-be-patched or to-be-unpatched function. 195d83a7cb3SJosh Poimboeuf */ 196d83a7cb3SJosh Poimboeuf static int klp_check_stack_func(struct klp_func *func, 197d83a7cb3SJosh Poimboeuf struct stack_trace *trace) 198d83a7cb3SJosh Poimboeuf { 199d83a7cb3SJosh Poimboeuf unsigned long func_addr, func_size, address; 200d83a7cb3SJosh Poimboeuf struct klp_ops *ops; 201d83a7cb3SJosh Poimboeuf int i; 202d83a7cb3SJosh Poimboeuf 203d83a7cb3SJosh Poimboeuf for (i = 0; i < trace->nr_entries; i++) { 204d83a7cb3SJosh Poimboeuf address = trace->entries[i]; 205d83a7cb3SJosh Poimboeuf 206d83a7cb3SJosh Poimboeuf if (klp_target_state == KLP_UNPATCHED) { 207d83a7cb3SJosh Poimboeuf /* 208d83a7cb3SJosh Poimboeuf * Check for the to-be-unpatched function 209d83a7cb3SJosh Poimboeuf * (the func itself). 210d83a7cb3SJosh Poimboeuf */ 211d83a7cb3SJosh Poimboeuf func_addr = (unsigned long)func->new_func; 212d83a7cb3SJosh Poimboeuf func_size = func->new_size; 213d83a7cb3SJosh Poimboeuf } else { 214d83a7cb3SJosh Poimboeuf /* 215d83a7cb3SJosh Poimboeuf * Check for the to-be-patched function 216d83a7cb3SJosh Poimboeuf * (the previous func). 217d83a7cb3SJosh Poimboeuf */ 21819514910SPetr Mladek ops = klp_find_ops(func->old_func); 219d83a7cb3SJosh Poimboeuf 220d83a7cb3SJosh Poimboeuf if (list_is_singular(&ops->func_stack)) { 221d83a7cb3SJosh Poimboeuf /* original function */ 22219514910SPetr Mladek func_addr = (unsigned long)func->old_func; 223d83a7cb3SJosh Poimboeuf func_size = func->old_size; 224d83a7cb3SJosh Poimboeuf } else { 225d83a7cb3SJosh Poimboeuf /* previously patched function */ 226d83a7cb3SJosh Poimboeuf struct klp_func *prev; 227d83a7cb3SJosh Poimboeuf 228d83a7cb3SJosh Poimboeuf prev = list_next_entry(func, stack_node); 229d83a7cb3SJosh Poimboeuf func_addr = (unsigned long)prev->new_func; 230d83a7cb3SJosh Poimboeuf func_size = prev->new_size; 231d83a7cb3SJosh Poimboeuf } 232d83a7cb3SJosh Poimboeuf } 233d83a7cb3SJosh Poimboeuf 234d83a7cb3SJosh Poimboeuf if (address >= func_addr && address < func_addr + func_size) 235d83a7cb3SJosh Poimboeuf return -EAGAIN; 236d83a7cb3SJosh Poimboeuf } 237d83a7cb3SJosh Poimboeuf 238d83a7cb3SJosh Poimboeuf return 0; 239d83a7cb3SJosh Poimboeuf } 240d83a7cb3SJosh Poimboeuf 241d83a7cb3SJosh Poimboeuf /* 242d83a7cb3SJosh Poimboeuf * Determine whether it's safe to transition the task to the target patch state 243d83a7cb3SJosh Poimboeuf * by looking for any to-be-patched or to-be-unpatched functions on its stack. 244d83a7cb3SJosh Poimboeuf */ 245d83a7cb3SJosh Poimboeuf static int klp_check_stack(struct task_struct *task, char *err_buf) 246d83a7cb3SJosh Poimboeuf { 247d83a7cb3SJosh Poimboeuf static unsigned long entries[MAX_STACK_ENTRIES]; 248d83a7cb3SJosh Poimboeuf struct stack_trace trace; 249d83a7cb3SJosh Poimboeuf struct klp_object *obj; 250d83a7cb3SJosh Poimboeuf struct klp_func *func; 251d83a7cb3SJosh Poimboeuf int ret; 252d83a7cb3SJosh Poimboeuf 253d83a7cb3SJosh Poimboeuf trace.skip = 0; 254d83a7cb3SJosh Poimboeuf trace.nr_entries = 0; 255d83a7cb3SJosh Poimboeuf trace.max_entries = MAX_STACK_ENTRIES; 256d83a7cb3SJosh Poimboeuf trace.entries = entries; 257d83a7cb3SJosh Poimboeuf ret = save_stack_trace_tsk_reliable(task, &trace); 258d83a7cb3SJosh Poimboeuf WARN_ON_ONCE(ret == -ENOSYS); 259d83a7cb3SJosh Poimboeuf if (ret) { 260d83a7cb3SJosh Poimboeuf snprintf(err_buf, STACK_ERR_BUF_SIZE, 261d83a7cb3SJosh Poimboeuf "%s: %s:%d has an unreliable stack\n", 262d83a7cb3SJosh Poimboeuf __func__, task->comm, task->pid); 263d83a7cb3SJosh Poimboeuf return ret; 264d83a7cb3SJosh Poimboeuf } 265d83a7cb3SJosh Poimboeuf 266d83a7cb3SJosh Poimboeuf klp_for_each_object(klp_transition_patch, obj) { 267d83a7cb3SJosh Poimboeuf if (!obj->patched) 268d83a7cb3SJosh Poimboeuf continue; 269d83a7cb3SJosh Poimboeuf klp_for_each_func(obj, func) { 270d83a7cb3SJosh Poimboeuf ret = klp_check_stack_func(func, &trace); 271d83a7cb3SJosh Poimboeuf if (ret) { 272d83a7cb3SJosh Poimboeuf snprintf(err_buf, STACK_ERR_BUF_SIZE, 273d83a7cb3SJosh Poimboeuf "%s: %s:%d is sleeping on function %s\n", 274d83a7cb3SJosh Poimboeuf __func__, task->comm, task->pid, 275d83a7cb3SJosh Poimboeuf func->old_name); 276d83a7cb3SJosh Poimboeuf return ret; 277d83a7cb3SJosh Poimboeuf } 278d83a7cb3SJosh Poimboeuf } 279d83a7cb3SJosh Poimboeuf } 280d83a7cb3SJosh Poimboeuf 281d83a7cb3SJosh Poimboeuf return 0; 282d83a7cb3SJosh Poimboeuf } 283d83a7cb3SJosh Poimboeuf 284d83a7cb3SJosh Poimboeuf /* 285d83a7cb3SJosh Poimboeuf * Try to safely switch a task to the target patch state. If it's currently 286d83a7cb3SJosh Poimboeuf * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or 287d83a7cb3SJosh Poimboeuf * if the stack is unreliable, return false. 288d83a7cb3SJosh Poimboeuf */ 289d83a7cb3SJosh Poimboeuf static bool klp_try_switch_task(struct task_struct *task) 290d83a7cb3SJosh Poimboeuf { 291d83a7cb3SJosh Poimboeuf struct rq *rq; 292d83a7cb3SJosh Poimboeuf struct rq_flags flags; 293d83a7cb3SJosh Poimboeuf int ret; 294d83a7cb3SJosh Poimboeuf bool success = false; 295d83a7cb3SJosh Poimboeuf char err_buf[STACK_ERR_BUF_SIZE]; 296d83a7cb3SJosh Poimboeuf 297d83a7cb3SJosh Poimboeuf err_buf[0] = '\0'; 298d83a7cb3SJosh Poimboeuf 299d83a7cb3SJosh Poimboeuf /* check if this task has already switched over */ 300d83a7cb3SJosh Poimboeuf if (task->patch_state == klp_target_state) 301d83a7cb3SJosh Poimboeuf return true; 302d83a7cb3SJosh Poimboeuf 303d83a7cb3SJosh Poimboeuf /* 304d83a7cb3SJosh Poimboeuf * Now try to check the stack for any to-be-patched or to-be-unpatched 305d83a7cb3SJosh Poimboeuf * functions. If all goes well, switch the task to the target patch 306d83a7cb3SJosh Poimboeuf * state. 307d83a7cb3SJosh Poimboeuf */ 308d83a7cb3SJosh Poimboeuf rq = task_rq_lock(task, &flags); 309d83a7cb3SJosh Poimboeuf 310d83a7cb3SJosh Poimboeuf if (task_running(rq, task) && task != current) { 311d83a7cb3SJosh Poimboeuf snprintf(err_buf, STACK_ERR_BUF_SIZE, 312d83a7cb3SJosh Poimboeuf "%s: %s:%d is running\n", __func__, task->comm, 313d83a7cb3SJosh Poimboeuf task->pid); 314d83a7cb3SJosh Poimboeuf goto done; 315d83a7cb3SJosh Poimboeuf } 316d83a7cb3SJosh Poimboeuf 317d83a7cb3SJosh Poimboeuf ret = klp_check_stack(task, err_buf); 318d83a7cb3SJosh Poimboeuf if (ret) 319d83a7cb3SJosh Poimboeuf goto done; 320d83a7cb3SJosh Poimboeuf 321d83a7cb3SJosh Poimboeuf success = true; 322d83a7cb3SJosh Poimboeuf 323d83a7cb3SJosh Poimboeuf clear_tsk_thread_flag(task, TIF_PATCH_PENDING); 324d83a7cb3SJosh Poimboeuf task->patch_state = klp_target_state; 325d83a7cb3SJosh Poimboeuf 326d83a7cb3SJosh Poimboeuf done: 327d83a7cb3SJosh Poimboeuf task_rq_unlock(rq, task, &flags); 328d83a7cb3SJosh Poimboeuf 329d83a7cb3SJosh Poimboeuf /* 330d83a7cb3SJosh Poimboeuf * Due to console deadlock issues, pr_debug() can't be used while 331d83a7cb3SJosh Poimboeuf * holding the task rq lock. Instead we have to use a temporary buffer 332d83a7cb3SJosh Poimboeuf * and print the debug message after releasing the lock. 333d83a7cb3SJosh Poimboeuf */ 334d83a7cb3SJosh Poimboeuf if (err_buf[0] != '\0') 335d83a7cb3SJosh Poimboeuf pr_debug("%s", err_buf); 336d83a7cb3SJosh Poimboeuf 337d83a7cb3SJosh Poimboeuf return success; 338d83a7cb3SJosh Poimboeuf 339d83a7cb3SJosh Poimboeuf } 340d83a7cb3SJosh Poimboeuf 341d83a7cb3SJosh Poimboeuf /* 342d83a7cb3SJosh Poimboeuf * Try to switch all remaining tasks to the target patch state by walking the 343d83a7cb3SJosh Poimboeuf * stacks of sleeping tasks and looking for any to-be-patched or 344d83a7cb3SJosh Poimboeuf * to-be-unpatched functions. If such functions are found, the task can't be 345d83a7cb3SJosh Poimboeuf * switched yet. 346d83a7cb3SJosh Poimboeuf * 347d83a7cb3SJosh Poimboeuf * If any tasks are still stuck in the initial patch state, schedule a retry. 348d83a7cb3SJosh Poimboeuf */ 349d83a7cb3SJosh Poimboeuf void klp_try_complete_transition(void) 350d83a7cb3SJosh Poimboeuf { 351d83a7cb3SJosh Poimboeuf unsigned int cpu; 352d83a7cb3SJosh Poimboeuf struct task_struct *g, *task; 353*958ef1e3SPetr Mladek struct klp_patch *patch; 354d83a7cb3SJosh Poimboeuf bool complete = true; 355d83a7cb3SJosh Poimboeuf 356d83a7cb3SJosh Poimboeuf WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); 357d83a7cb3SJosh Poimboeuf 358d83a7cb3SJosh Poimboeuf /* 359d83a7cb3SJosh Poimboeuf * Try to switch the tasks to the target patch state by walking their 360d83a7cb3SJosh Poimboeuf * stacks and looking for any to-be-patched or to-be-unpatched 361d83a7cb3SJosh Poimboeuf * functions. If such functions are found on a stack, or if the stack 362d83a7cb3SJosh Poimboeuf * is deemed unreliable, the task can't be switched yet. 363d83a7cb3SJosh Poimboeuf * 364d83a7cb3SJosh Poimboeuf * Usually this will transition most (or all) of the tasks on a system 365d83a7cb3SJosh Poimboeuf * unless the patch includes changes to a very common function. 366d83a7cb3SJosh Poimboeuf */ 367d83a7cb3SJosh Poimboeuf read_lock(&tasklist_lock); 368d83a7cb3SJosh Poimboeuf for_each_process_thread(g, task) 369d83a7cb3SJosh Poimboeuf if (!klp_try_switch_task(task)) 370d83a7cb3SJosh Poimboeuf complete = false; 371d83a7cb3SJosh Poimboeuf read_unlock(&tasklist_lock); 372d83a7cb3SJosh Poimboeuf 373d83a7cb3SJosh Poimboeuf /* 374d83a7cb3SJosh Poimboeuf * Ditto for the idle "swapper" tasks. 375d83a7cb3SJosh Poimboeuf */ 376d83a7cb3SJosh Poimboeuf get_online_cpus(); 377d83a7cb3SJosh Poimboeuf for_each_possible_cpu(cpu) { 378d83a7cb3SJosh Poimboeuf task = idle_task(cpu); 379d83a7cb3SJosh Poimboeuf if (cpu_online(cpu)) { 380d83a7cb3SJosh Poimboeuf if (!klp_try_switch_task(task)) 381d83a7cb3SJosh Poimboeuf complete = false; 382d83a7cb3SJosh Poimboeuf } else if (task->patch_state != klp_target_state) { 383d83a7cb3SJosh Poimboeuf /* offline idle tasks can be switched immediately */ 384d83a7cb3SJosh Poimboeuf clear_tsk_thread_flag(task, TIF_PATCH_PENDING); 385d83a7cb3SJosh Poimboeuf task->patch_state = klp_target_state; 386d83a7cb3SJosh Poimboeuf } 387d83a7cb3SJosh Poimboeuf } 388d83a7cb3SJosh Poimboeuf put_online_cpus(); 389d83a7cb3SJosh Poimboeuf 390d83a7cb3SJosh Poimboeuf if (!complete) { 391d83a7cb3SJosh Poimboeuf /* 392d83a7cb3SJosh Poimboeuf * Some tasks weren't able to be switched over. Try again 393d83a7cb3SJosh Poimboeuf * later and/or wait for other methods like kernel exit 394d83a7cb3SJosh Poimboeuf * switching. 395d83a7cb3SJosh Poimboeuf */ 396d83a7cb3SJosh Poimboeuf schedule_delayed_work(&klp_transition_work, 397d83a7cb3SJosh Poimboeuf round_jiffies_relative(HZ)); 398d83a7cb3SJosh Poimboeuf return; 399d83a7cb3SJosh Poimboeuf } 400d83a7cb3SJosh Poimboeuf 401d83a7cb3SJosh Poimboeuf /* we're done, now cleanup the data structures */ 402*958ef1e3SPetr Mladek patch = klp_transition_patch; 403d83a7cb3SJosh Poimboeuf klp_complete_transition(); 404*958ef1e3SPetr Mladek 405*958ef1e3SPetr Mladek /* 406*958ef1e3SPetr Mladek * It would make more sense to free the patch in 407*958ef1e3SPetr Mladek * klp_complete_transition() but it is called also 408*958ef1e3SPetr Mladek * from klp_cancel_transition(). 409*958ef1e3SPetr Mladek */ 410*958ef1e3SPetr Mladek if (!patch->enabled) { 411*958ef1e3SPetr Mladek klp_free_patch_start(patch); 412*958ef1e3SPetr Mladek schedule_work(&patch->free_work); 413*958ef1e3SPetr Mladek } 414d83a7cb3SJosh Poimboeuf } 415d83a7cb3SJosh Poimboeuf 416d83a7cb3SJosh Poimboeuf /* 417d83a7cb3SJosh Poimboeuf * Start the transition to the specified target patch state so tasks can begin 418d83a7cb3SJosh Poimboeuf * switching to it. 419d83a7cb3SJosh Poimboeuf */ 420d83a7cb3SJosh Poimboeuf void klp_start_transition(void) 421d83a7cb3SJosh Poimboeuf { 422d83a7cb3SJosh Poimboeuf struct task_struct *g, *task; 423d83a7cb3SJosh Poimboeuf unsigned int cpu; 424d83a7cb3SJosh Poimboeuf 425d83a7cb3SJosh Poimboeuf WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); 426d83a7cb3SJosh Poimboeuf 427af026796SJoe Lawrence pr_notice("'%s': starting %s transition\n", 428af026796SJoe Lawrence klp_transition_patch->mod->name, 429d83a7cb3SJosh Poimboeuf klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); 430d83a7cb3SJosh Poimboeuf 431d83a7cb3SJosh Poimboeuf /* 432d83a7cb3SJosh Poimboeuf * Mark all normal tasks as needing a patch state update. They'll 433d83a7cb3SJosh Poimboeuf * switch either in klp_try_complete_transition() or as they exit the 434d83a7cb3SJosh Poimboeuf * kernel. 435d83a7cb3SJosh Poimboeuf */ 436d83a7cb3SJosh Poimboeuf read_lock(&tasklist_lock); 437d83a7cb3SJosh Poimboeuf for_each_process_thread(g, task) 438d83a7cb3SJosh Poimboeuf if (task->patch_state != klp_target_state) 439d83a7cb3SJosh Poimboeuf set_tsk_thread_flag(task, TIF_PATCH_PENDING); 440d83a7cb3SJosh Poimboeuf read_unlock(&tasklist_lock); 441d83a7cb3SJosh Poimboeuf 442d83a7cb3SJosh Poimboeuf /* 443d83a7cb3SJosh Poimboeuf * Mark all idle tasks as needing a patch state update. They'll switch 444d83a7cb3SJosh Poimboeuf * either in klp_try_complete_transition() or at the idle loop switch 445d83a7cb3SJosh Poimboeuf * point. 446d83a7cb3SJosh Poimboeuf */ 447d83a7cb3SJosh Poimboeuf for_each_possible_cpu(cpu) { 448d83a7cb3SJosh Poimboeuf task = idle_task(cpu); 449d83a7cb3SJosh Poimboeuf if (task->patch_state != klp_target_state) 450d83a7cb3SJosh Poimboeuf set_tsk_thread_flag(task, TIF_PATCH_PENDING); 451d83a7cb3SJosh Poimboeuf } 452d83a7cb3SJosh Poimboeuf } 453d83a7cb3SJosh Poimboeuf 454d83a7cb3SJosh Poimboeuf /* 455d83a7cb3SJosh Poimboeuf * Initialize the global target patch state and all tasks to the initial patch 456d83a7cb3SJosh Poimboeuf * state, and initialize all function transition states to true in preparation 457d83a7cb3SJosh Poimboeuf * for patching or unpatching. 458d83a7cb3SJosh Poimboeuf */ 459d83a7cb3SJosh Poimboeuf void klp_init_transition(struct klp_patch *patch, int state) 460d83a7cb3SJosh Poimboeuf { 461d83a7cb3SJosh Poimboeuf struct task_struct *g, *task; 462d83a7cb3SJosh Poimboeuf unsigned int cpu; 463d83a7cb3SJosh Poimboeuf struct klp_object *obj; 464d83a7cb3SJosh Poimboeuf struct klp_func *func; 465d83a7cb3SJosh Poimboeuf int initial_state = !state; 466d83a7cb3SJosh Poimboeuf 467d83a7cb3SJosh Poimboeuf WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED); 468d83a7cb3SJosh Poimboeuf 469d83a7cb3SJosh Poimboeuf klp_transition_patch = patch; 470d83a7cb3SJosh Poimboeuf 471d83a7cb3SJosh Poimboeuf /* 472d83a7cb3SJosh Poimboeuf * Set the global target patch state which tasks will switch to. This 473d83a7cb3SJosh Poimboeuf * has no effect until the TIF_PATCH_PENDING flags get set later. 474d83a7cb3SJosh Poimboeuf */ 475d83a7cb3SJosh Poimboeuf klp_target_state = state; 476d83a7cb3SJosh Poimboeuf 477af026796SJoe Lawrence pr_debug("'%s': initializing %s transition\n", patch->mod->name, 478af026796SJoe Lawrence klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); 479af026796SJoe Lawrence 480d83a7cb3SJosh Poimboeuf /* 481d83a7cb3SJosh Poimboeuf * Initialize all tasks to the initial patch state to prepare them for 482d83a7cb3SJosh Poimboeuf * switching to the target state. 483d83a7cb3SJosh Poimboeuf */ 484d83a7cb3SJosh Poimboeuf read_lock(&tasklist_lock); 485d83a7cb3SJosh Poimboeuf for_each_process_thread(g, task) { 486d83a7cb3SJosh Poimboeuf WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED); 487d83a7cb3SJosh Poimboeuf task->patch_state = initial_state; 488d83a7cb3SJosh Poimboeuf } 489d83a7cb3SJosh Poimboeuf read_unlock(&tasklist_lock); 490d83a7cb3SJosh Poimboeuf 491d83a7cb3SJosh Poimboeuf /* 492d83a7cb3SJosh Poimboeuf * Ditto for the idle "swapper" tasks. 493d83a7cb3SJosh Poimboeuf */ 494d83a7cb3SJosh Poimboeuf for_each_possible_cpu(cpu) { 495d83a7cb3SJosh Poimboeuf task = idle_task(cpu); 496d83a7cb3SJosh Poimboeuf WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED); 497d83a7cb3SJosh Poimboeuf task->patch_state = initial_state; 498d83a7cb3SJosh Poimboeuf } 499d83a7cb3SJosh Poimboeuf 500d83a7cb3SJosh Poimboeuf /* 501d83a7cb3SJosh Poimboeuf * Enforce the order of the task->patch_state initializations and the 502d83a7cb3SJosh Poimboeuf * func->transition updates to ensure that klp_ftrace_handler() doesn't 503d83a7cb3SJosh Poimboeuf * see a func in transition with a task->patch_state of KLP_UNDEFINED. 504d83a7cb3SJosh Poimboeuf * 505d83a7cb3SJosh Poimboeuf * Also enforce the order of the klp_target_state write and future 506d83a7cb3SJosh Poimboeuf * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() doesn't 507d83a7cb3SJosh Poimboeuf * set a task->patch_state to KLP_UNDEFINED. 508d83a7cb3SJosh Poimboeuf */ 509d83a7cb3SJosh Poimboeuf smp_wmb(); 510d83a7cb3SJosh Poimboeuf 511d83a7cb3SJosh Poimboeuf /* 512d83a7cb3SJosh Poimboeuf * Set the func transition states so klp_ftrace_handler() will know to 513d83a7cb3SJosh Poimboeuf * switch to the transition logic. 514d83a7cb3SJosh Poimboeuf * 515d83a7cb3SJosh Poimboeuf * When patching, the funcs aren't yet in the func_stack and will be 516d83a7cb3SJosh Poimboeuf * made visible to the ftrace handler shortly by the calls to 517d83a7cb3SJosh Poimboeuf * klp_patch_object(). 518d83a7cb3SJosh Poimboeuf * 519d83a7cb3SJosh Poimboeuf * When unpatching, the funcs are already in the func_stack and so are 520d83a7cb3SJosh Poimboeuf * already visible to the ftrace handler. 521d83a7cb3SJosh Poimboeuf */ 522d83a7cb3SJosh Poimboeuf klp_for_each_object(patch, obj) 523d83a7cb3SJosh Poimboeuf klp_for_each_func(obj, func) 524d83a7cb3SJosh Poimboeuf func->transition = true; 525d83a7cb3SJosh Poimboeuf } 526d83a7cb3SJosh Poimboeuf 527d83a7cb3SJosh Poimboeuf /* 528d83a7cb3SJosh Poimboeuf * This function can be called in the middle of an existing transition to 529d83a7cb3SJosh Poimboeuf * reverse the direction of the target patch state. This can be done to 530d83a7cb3SJosh Poimboeuf * effectively cancel an existing enable or disable operation if there are any 531d83a7cb3SJosh Poimboeuf * tasks which are stuck in the initial patch state. 532d83a7cb3SJosh Poimboeuf */ 533d83a7cb3SJosh Poimboeuf void klp_reverse_transition(void) 534d83a7cb3SJosh Poimboeuf { 535d83a7cb3SJosh Poimboeuf unsigned int cpu; 536d83a7cb3SJosh Poimboeuf struct task_struct *g, *task; 537d83a7cb3SJosh Poimboeuf 538af026796SJoe Lawrence pr_debug("'%s': reversing transition from %s\n", 539af026796SJoe Lawrence klp_transition_patch->mod->name, 540af026796SJoe Lawrence klp_target_state == KLP_PATCHED ? "patching to unpatching" : 541af026796SJoe Lawrence "unpatching to patching"); 542af026796SJoe Lawrence 543d83a7cb3SJosh Poimboeuf klp_transition_patch->enabled = !klp_transition_patch->enabled; 544d83a7cb3SJosh Poimboeuf 545d83a7cb3SJosh Poimboeuf klp_target_state = !klp_target_state; 546d83a7cb3SJosh Poimboeuf 547d83a7cb3SJosh Poimboeuf /* 548d83a7cb3SJosh Poimboeuf * Clear all TIF_PATCH_PENDING flags to prevent races caused by 549d83a7cb3SJosh Poimboeuf * klp_update_patch_state() running in parallel with 550d83a7cb3SJosh Poimboeuf * klp_start_transition(). 551d83a7cb3SJosh Poimboeuf */ 552d83a7cb3SJosh Poimboeuf read_lock(&tasklist_lock); 553d83a7cb3SJosh Poimboeuf for_each_process_thread(g, task) 554d83a7cb3SJosh Poimboeuf clear_tsk_thread_flag(task, TIF_PATCH_PENDING); 555d83a7cb3SJosh Poimboeuf read_unlock(&tasklist_lock); 556d83a7cb3SJosh Poimboeuf 557d83a7cb3SJosh Poimboeuf for_each_possible_cpu(cpu) 558d83a7cb3SJosh Poimboeuf clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING); 559d83a7cb3SJosh Poimboeuf 560d83a7cb3SJosh Poimboeuf /* Let any remaining calls to klp_update_patch_state() complete */ 561842c0884SPetr Mladek klp_synchronize_transition(); 562d83a7cb3SJosh Poimboeuf 563d83a7cb3SJosh Poimboeuf klp_start_transition(); 564d83a7cb3SJosh Poimboeuf } 565d83a7cb3SJosh Poimboeuf 566d83a7cb3SJosh Poimboeuf /* Called from copy_process() during fork */ 567d83a7cb3SJosh Poimboeuf void klp_copy_process(struct task_struct *child) 568d83a7cb3SJosh Poimboeuf { 569d83a7cb3SJosh Poimboeuf child->patch_state = current->patch_state; 570d83a7cb3SJosh Poimboeuf 571d83a7cb3SJosh Poimboeuf /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */ 572d83a7cb3SJosh Poimboeuf } 57343347d56SMiroslav Benes 57443347d56SMiroslav Benes /* 57543347d56SMiroslav Benes * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set. 57643347d56SMiroslav Benes * Kthreads with TIF_PATCH_PENDING set are woken up. Only admin can request this 57743347d56SMiroslav Benes * action currently. 57843347d56SMiroslav Benes */ 57943347d56SMiroslav Benes void klp_send_signals(void) 58043347d56SMiroslav Benes { 58143347d56SMiroslav Benes struct task_struct *g, *task; 58243347d56SMiroslav Benes 58343347d56SMiroslav Benes pr_notice("signaling remaining tasks\n"); 58443347d56SMiroslav Benes 58543347d56SMiroslav Benes read_lock(&tasklist_lock); 58643347d56SMiroslav Benes for_each_process_thread(g, task) { 58743347d56SMiroslav Benes if (!klp_patch_pending(task)) 58843347d56SMiroslav Benes continue; 58943347d56SMiroslav Benes 59043347d56SMiroslav Benes /* 59143347d56SMiroslav Benes * There is a small race here. We could see TIF_PATCH_PENDING 59243347d56SMiroslav Benes * set and decide to wake up a kthread or send a fake signal. 59343347d56SMiroslav Benes * Meanwhile the task could migrate itself and the action 59443347d56SMiroslav Benes * would be meaningless. It is not serious though. 59543347d56SMiroslav Benes */ 59643347d56SMiroslav Benes if (task->flags & PF_KTHREAD) { 59743347d56SMiroslav Benes /* 59843347d56SMiroslav Benes * Wake up a kthread which sleeps interruptedly and 59943347d56SMiroslav Benes * still has not been migrated. 60043347d56SMiroslav Benes */ 60143347d56SMiroslav Benes wake_up_state(task, TASK_INTERRUPTIBLE); 60243347d56SMiroslav Benes } else { 60343347d56SMiroslav Benes /* 60443347d56SMiroslav Benes * Send fake signal to all non-kthread tasks which are 60543347d56SMiroslav Benes * still not migrated. 60643347d56SMiroslav Benes */ 60743347d56SMiroslav Benes spin_lock_irq(&task->sighand->siglock); 60843347d56SMiroslav Benes signal_wake_up(task, 0); 60943347d56SMiroslav Benes spin_unlock_irq(&task->sighand->siglock); 61043347d56SMiroslav Benes } 61143347d56SMiroslav Benes } 61243347d56SMiroslav Benes read_unlock(&tasklist_lock); 61343347d56SMiroslav Benes } 614c99a2be7SMiroslav Benes 615c99a2be7SMiroslav Benes /* 616c99a2be7SMiroslav Benes * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an 617c99a2be7SMiroslav Benes * existing transition to finish. 618c99a2be7SMiroslav Benes * 619c99a2be7SMiroslav Benes * NOTE: klp_update_patch_state(task) requires the task to be inactive or 620c99a2be7SMiroslav Benes * 'current'. This is not the case here and the consistency model could be 621c99a2be7SMiroslav Benes * broken. Administrator, who is the only one to execute the 622c99a2be7SMiroslav Benes * klp_force_transitions(), has to be aware of this. 623c99a2be7SMiroslav Benes */ 624c99a2be7SMiroslav Benes void klp_force_transition(void) 625c99a2be7SMiroslav Benes { 62668007289SPetr Mladek struct klp_patch *patch; 627c99a2be7SMiroslav Benes struct task_struct *g, *task; 628c99a2be7SMiroslav Benes unsigned int cpu; 629c99a2be7SMiroslav Benes 630c99a2be7SMiroslav Benes pr_warn("forcing remaining tasks to the patched state\n"); 631c99a2be7SMiroslav Benes 632c99a2be7SMiroslav Benes read_lock(&tasklist_lock); 633c99a2be7SMiroslav Benes for_each_process_thread(g, task) 634c99a2be7SMiroslav Benes klp_update_patch_state(task); 635c99a2be7SMiroslav Benes read_unlock(&tasklist_lock); 636c99a2be7SMiroslav Benes 637c99a2be7SMiroslav Benes for_each_possible_cpu(cpu) 638c99a2be7SMiroslav Benes klp_update_patch_state(idle_task(cpu)); 639c99a2be7SMiroslav Benes 64068007289SPetr Mladek list_for_each_entry(patch, &klp_patches, list) 64168007289SPetr Mladek patch->forced = true; 642c99a2be7SMiroslav Benes } 643