1d83a7cb3SJosh Poimboeuf /* 2d83a7cb3SJosh Poimboeuf * transition.c - Kernel Live Patching transition functions 3d83a7cb3SJosh Poimboeuf * 4d83a7cb3SJosh Poimboeuf * Copyright (C) 2015-2016 Josh Poimboeuf <jpoimboe@redhat.com> 5d83a7cb3SJosh Poimboeuf * 6d83a7cb3SJosh Poimboeuf * This program is free software; you can redistribute it and/or 7d83a7cb3SJosh Poimboeuf * modify it under the terms of the GNU General Public License 8d83a7cb3SJosh Poimboeuf * as published by the Free Software Foundation; either version 2 9d83a7cb3SJosh Poimboeuf * of the License, or (at your option) any later version. 10d83a7cb3SJosh Poimboeuf * 11d83a7cb3SJosh Poimboeuf * This program is distributed in the hope that it will be useful, 12d83a7cb3SJosh Poimboeuf * but WITHOUT ANY WARRANTY; without even the implied warranty of 13d83a7cb3SJosh Poimboeuf * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14d83a7cb3SJosh Poimboeuf * GNU General Public License for more details. 15d83a7cb3SJosh Poimboeuf * 16d83a7cb3SJosh Poimboeuf * You should have received a copy of the GNU General Public License 17d83a7cb3SJosh Poimboeuf * along with this program; if not, see <http://www.gnu.org/licenses/>. 18d83a7cb3SJosh Poimboeuf */ 19d83a7cb3SJosh Poimboeuf 20d83a7cb3SJosh Poimboeuf #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 21d83a7cb3SJosh Poimboeuf 22d83a7cb3SJosh Poimboeuf #include <linux/cpu.h> 23d83a7cb3SJosh Poimboeuf #include <linux/stacktrace.h> 2410517429SJiri Kosina #include "core.h" 25d83a7cb3SJosh Poimboeuf #include "patch.h" 26d83a7cb3SJosh Poimboeuf #include "transition.h" 27d83a7cb3SJosh Poimboeuf #include "../sched/sched.h" 28d83a7cb3SJosh Poimboeuf 29d83a7cb3SJosh Poimboeuf #define MAX_STACK_ENTRIES 100 30d83a7cb3SJosh Poimboeuf #define STACK_ERR_BUF_SIZE 128 31d83a7cb3SJosh Poimboeuf 32d83a7cb3SJosh Poimboeuf struct klp_patch *klp_transition_patch; 33d83a7cb3SJosh Poimboeuf 34d83a7cb3SJosh Poimboeuf static int klp_target_state = KLP_UNDEFINED; 35d83a7cb3SJosh Poimboeuf 36d83a7cb3SJosh Poimboeuf /* 37d83a7cb3SJosh Poimboeuf * This work can be performed periodically to finish patching or unpatching any 38d83a7cb3SJosh Poimboeuf * "straggler" tasks which failed to transition in the first attempt. 39d83a7cb3SJosh Poimboeuf */ 40d83a7cb3SJosh Poimboeuf static void klp_transition_work_fn(struct work_struct *work) 41d83a7cb3SJosh Poimboeuf { 42d83a7cb3SJosh Poimboeuf mutex_lock(&klp_mutex); 43d83a7cb3SJosh Poimboeuf 44d83a7cb3SJosh Poimboeuf if (klp_transition_patch) 45d83a7cb3SJosh Poimboeuf klp_try_complete_transition(); 46d83a7cb3SJosh Poimboeuf 47d83a7cb3SJosh Poimboeuf mutex_unlock(&klp_mutex); 48d83a7cb3SJosh Poimboeuf } 49d83a7cb3SJosh Poimboeuf static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn); 50d83a7cb3SJosh Poimboeuf 51d83a7cb3SJosh Poimboeuf /* 52842c0884SPetr Mladek * This function is just a stub to implement a hard force 53842c0884SPetr Mladek * of synchronize_sched(). This requires synchronizing 54842c0884SPetr Mladek * tasks even in userspace and idle. 55842c0884SPetr Mladek */ 56842c0884SPetr Mladek static void klp_sync(struct work_struct *work) 57842c0884SPetr Mladek { 58842c0884SPetr Mladek } 59842c0884SPetr Mladek 60842c0884SPetr Mladek /* 61842c0884SPetr Mladek * We allow to patch also functions where RCU is not watching, 62842c0884SPetr Mladek * e.g. before user_exit(). We can not rely on the RCU infrastructure 63842c0884SPetr Mladek * to do the synchronization. Instead hard force the sched synchronization. 64842c0884SPetr Mladek * 65842c0884SPetr Mladek * This approach allows to use RCU functions for manipulating func_stack 66842c0884SPetr Mladek * safely. 67842c0884SPetr Mladek */ 68842c0884SPetr Mladek static void klp_synchronize_transition(void) 69842c0884SPetr Mladek { 70842c0884SPetr Mladek schedule_on_each_cpu(klp_sync); 71842c0884SPetr Mladek } 72842c0884SPetr Mladek 73842c0884SPetr Mladek /* 74d83a7cb3SJosh Poimboeuf * The transition to the target patch state is complete. Clean up the data 75d83a7cb3SJosh Poimboeuf * structures. 76d83a7cb3SJosh Poimboeuf */ 77d83a7cb3SJosh Poimboeuf static void klp_complete_transition(void) 78d83a7cb3SJosh Poimboeuf { 79d83a7cb3SJosh Poimboeuf struct klp_object *obj; 80d83a7cb3SJosh Poimboeuf struct klp_func *func; 81d83a7cb3SJosh Poimboeuf struct task_struct *g, *task; 82d83a7cb3SJosh Poimboeuf unsigned int cpu; 833ec24776SJosh Poimboeuf bool immediate_func = false; 84d83a7cb3SJosh Poimboeuf 85d83a7cb3SJosh Poimboeuf if (klp_target_state == KLP_UNPATCHED) { 86d83a7cb3SJosh Poimboeuf /* 87d83a7cb3SJosh Poimboeuf * All tasks have transitioned to KLP_UNPATCHED so we can now 88d83a7cb3SJosh Poimboeuf * remove the new functions from the func_stack. 89d83a7cb3SJosh Poimboeuf */ 90d83a7cb3SJosh Poimboeuf klp_unpatch_objects(klp_transition_patch); 91d83a7cb3SJosh Poimboeuf 92d83a7cb3SJosh Poimboeuf /* 93d83a7cb3SJosh Poimboeuf * Make sure klp_ftrace_handler() can no longer see functions 94d83a7cb3SJosh Poimboeuf * from this patch on the ops->func_stack. Otherwise, after 95d83a7cb3SJosh Poimboeuf * func->transition gets cleared, the handler may choose a 96d83a7cb3SJosh Poimboeuf * removed function. 97d83a7cb3SJosh Poimboeuf */ 98842c0884SPetr Mladek klp_synchronize_transition(); 99d83a7cb3SJosh Poimboeuf } 100d83a7cb3SJosh Poimboeuf 101d83a7cb3SJosh Poimboeuf if (klp_transition_patch->immediate) 102d83a7cb3SJosh Poimboeuf goto done; 103d83a7cb3SJosh Poimboeuf 1043ec24776SJosh Poimboeuf klp_for_each_object(klp_transition_patch, obj) { 1053ec24776SJosh Poimboeuf klp_for_each_func(obj, func) { 106d83a7cb3SJosh Poimboeuf func->transition = false; 1073ec24776SJosh Poimboeuf if (func->immediate) 1083ec24776SJosh Poimboeuf immediate_func = true; 1093ec24776SJosh Poimboeuf } 1103ec24776SJosh Poimboeuf } 1113ec24776SJosh Poimboeuf 112d83a7cb3SJosh Poimboeuf /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */ 113d83a7cb3SJosh Poimboeuf if (klp_target_state == KLP_PATCHED) 114842c0884SPetr Mladek klp_synchronize_transition(); 115d83a7cb3SJosh Poimboeuf 116d83a7cb3SJosh Poimboeuf read_lock(&tasklist_lock); 117d83a7cb3SJosh Poimboeuf for_each_process_thread(g, task) { 118d83a7cb3SJosh Poimboeuf WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); 119d83a7cb3SJosh Poimboeuf task->patch_state = KLP_UNDEFINED; 120d83a7cb3SJosh Poimboeuf } 121d83a7cb3SJosh Poimboeuf read_unlock(&tasklist_lock); 122d83a7cb3SJosh Poimboeuf 123d83a7cb3SJosh Poimboeuf for_each_possible_cpu(cpu) { 124d83a7cb3SJosh Poimboeuf task = idle_task(cpu); 125d83a7cb3SJosh Poimboeuf WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); 126d83a7cb3SJosh Poimboeuf task->patch_state = KLP_UNDEFINED; 127d83a7cb3SJosh Poimboeuf } 128d83a7cb3SJosh Poimboeuf 129d83a7cb3SJosh Poimboeuf done: 130*93862e38SJoe Lawrence klp_for_each_object(klp_transition_patch, obj) { 131*93862e38SJoe Lawrence if (!klp_is_object_loaded(obj)) 132*93862e38SJoe Lawrence continue; 133*93862e38SJoe Lawrence if (klp_target_state == KLP_PATCHED) 134*93862e38SJoe Lawrence klp_post_patch_callback(obj); 135*93862e38SJoe Lawrence else if (klp_target_state == KLP_UNPATCHED) 136*93862e38SJoe Lawrence klp_post_unpatch_callback(obj); 137*93862e38SJoe Lawrence } 138*93862e38SJoe Lawrence 139*93862e38SJoe Lawrence /* 140*93862e38SJoe Lawrence * See complementary comment in __klp_enable_patch() for why we 141*93862e38SJoe Lawrence * keep the module reference for immediate patches. 142*93862e38SJoe Lawrence */ 143*93862e38SJoe Lawrence if (!klp_transition_patch->immediate && !immediate_func && 144*93862e38SJoe Lawrence klp_target_state == KLP_UNPATCHED) { 145*93862e38SJoe Lawrence module_put(klp_transition_patch->mod); 146*93862e38SJoe Lawrence } 147*93862e38SJoe Lawrence 148d83a7cb3SJosh Poimboeuf klp_target_state = KLP_UNDEFINED; 149d83a7cb3SJosh Poimboeuf klp_transition_patch = NULL; 150d83a7cb3SJosh Poimboeuf } 151d83a7cb3SJosh Poimboeuf 152d83a7cb3SJosh Poimboeuf /* 153d83a7cb3SJosh Poimboeuf * This is called in the error path, to cancel a transition before it has 154d83a7cb3SJosh Poimboeuf * started, i.e. klp_init_transition() has been called but 155d83a7cb3SJosh Poimboeuf * klp_start_transition() hasn't. If the transition *has* been started, 156d83a7cb3SJosh Poimboeuf * klp_reverse_transition() should be used instead. 157d83a7cb3SJosh Poimboeuf */ 158d83a7cb3SJosh Poimboeuf void klp_cancel_transition(void) 159d83a7cb3SJosh Poimboeuf { 1603ec24776SJosh Poimboeuf if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED)) 1613ec24776SJosh Poimboeuf return; 1623ec24776SJosh Poimboeuf 1633ec24776SJosh Poimboeuf klp_target_state = KLP_UNPATCHED; 164d83a7cb3SJosh Poimboeuf klp_complete_transition(); 165d83a7cb3SJosh Poimboeuf } 166d83a7cb3SJosh Poimboeuf 167d83a7cb3SJosh Poimboeuf /* 168d83a7cb3SJosh Poimboeuf * Switch the patched state of the task to the set of functions in the target 169d83a7cb3SJosh Poimboeuf * patch state. 170d83a7cb3SJosh Poimboeuf * 171d83a7cb3SJosh Poimboeuf * NOTE: If task is not 'current', the caller must ensure the task is inactive. 172d83a7cb3SJosh Poimboeuf * Otherwise klp_ftrace_handler() might read the wrong 'patch_state' value. 173d83a7cb3SJosh Poimboeuf */ 174d83a7cb3SJosh Poimboeuf void klp_update_patch_state(struct task_struct *task) 175d83a7cb3SJosh Poimboeuf { 176842c0884SPetr Mladek /* 177842c0884SPetr Mladek * A variant of synchronize_sched() is used to allow patching functions 178842c0884SPetr Mladek * where RCU is not watching, see klp_synchronize_transition(). 179842c0884SPetr Mladek */ 180842c0884SPetr Mladek preempt_disable_notrace(); 181d83a7cb3SJosh Poimboeuf 182d83a7cb3SJosh Poimboeuf /* 183d83a7cb3SJosh Poimboeuf * This test_and_clear_tsk_thread_flag() call also serves as a read 184d83a7cb3SJosh Poimboeuf * barrier (smp_rmb) for two cases: 185d83a7cb3SJosh Poimboeuf * 186d83a7cb3SJosh Poimboeuf * 1) Enforce the order of the TIF_PATCH_PENDING read and the 187d83a7cb3SJosh Poimboeuf * klp_target_state read. The corresponding write barrier is in 188d83a7cb3SJosh Poimboeuf * klp_init_transition(). 189d83a7cb3SJosh Poimboeuf * 190d83a7cb3SJosh Poimboeuf * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read 191d83a7cb3SJosh Poimboeuf * of func->transition, if klp_ftrace_handler() is called later on 192d83a7cb3SJosh Poimboeuf * the same CPU. See __klp_disable_patch(). 193d83a7cb3SJosh Poimboeuf */ 194d83a7cb3SJosh Poimboeuf if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING)) 195d83a7cb3SJosh Poimboeuf task->patch_state = READ_ONCE(klp_target_state); 196d83a7cb3SJosh Poimboeuf 197842c0884SPetr Mladek preempt_enable_notrace(); 198d83a7cb3SJosh Poimboeuf } 199d83a7cb3SJosh Poimboeuf 200d83a7cb3SJosh Poimboeuf /* 201d83a7cb3SJosh Poimboeuf * Determine whether the given stack trace includes any references to a 202d83a7cb3SJosh Poimboeuf * to-be-patched or to-be-unpatched function. 203d83a7cb3SJosh Poimboeuf */ 204d83a7cb3SJosh Poimboeuf static int klp_check_stack_func(struct klp_func *func, 205d83a7cb3SJosh Poimboeuf struct stack_trace *trace) 206d83a7cb3SJosh Poimboeuf { 207d83a7cb3SJosh Poimboeuf unsigned long func_addr, func_size, address; 208d83a7cb3SJosh Poimboeuf struct klp_ops *ops; 209d83a7cb3SJosh Poimboeuf int i; 210d83a7cb3SJosh Poimboeuf 211d83a7cb3SJosh Poimboeuf if (func->immediate) 212d83a7cb3SJosh Poimboeuf return 0; 213d83a7cb3SJosh Poimboeuf 214d83a7cb3SJosh Poimboeuf for (i = 0; i < trace->nr_entries; i++) { 215d83a7cb3SJosh Poimboeuf address = trace->entries[i]; 216d83a7cb3SJosh Poimboeuf 217d83a7cb3SJosh Poimboeuf if (klp_target_state == KLP_UNPATCHED) { 218d83a7cb3SJosh Poimboeuf /* 219d83a7cb3SJosh Poimboeuf * Check for the to-be-unpatched function 220d83a7cb3SJosh Poimboeuf * (the func itself). 221d83a7cb3SJosh Poimboeuf */ 222d83a7cb3SJosh Poimboeuf func_addr = (unsigned long)func->new_func; 223d83a7cb3SJosh Poimboeuf func_size = func->new_size; 224d83a7cb3SJosh Poimboeuf } else { 225d83a7cb3SJosh Poimboeuf /* 226d83a7cb3SJosh Poimboeuf * Check for the to-be-patched function 227d83a7cb3SJosh Poimboeuf * (the previous func). 228d83a7cb3SJosh Poimboeuf */ 229d83a7cb3SJosh Poimboeuf ops = klp_find_ops(func->old_addr); 230d83a7cb3SJosh Poimboeuf 231d83a7cb3SJosh Poimboeuf if (list_is_singular(&ops->func_stack)) { 232d83a7cb3SJosh Poimboeuf /* original function */ 233d83a7cb3SJosh Poimboeuf func_addr = func->old_addr; 234d83a7cb3SJosh Poimboeuf func_size = func->old_size; 235d83a7cb3SJosh Poimboeuf } else { 236d83a7cb3SJosh Poimboeuf /* previously patched function */ 237d83a7cb3SJosh Poimboeuf struct klp_func *prev; 238d83a7cb3SJosh Poimboeuf 239d83a7cb3SJosh Poimboeuf prev = list_next_entry(func, stack_node); 240d83a7cb3SJosh Poimboeuf func_addr = (unsigned long)prev->new_func; 241d83a7cb3SJosh Poimboeuf func_size = prev->new_size; 242d83a7cb3SJosh Poimboeuf } 243d83a7cb3SJosh Poimboeuf } 244d83a7cb3SJosh Poimboeuf 245d83a7cb3SJosh Poimboeuf if (address >= func_addr && address < func_addr + func_size) 246d83a7cb3SJosh Poimboeuf return -EAGAIN; 247d83a7cb3SJosh Poimboeuf } 248d83a7cb3SJosh Poimboeuf 249d83a7cb3SJosh Poimboeuf return 0; 250d83a7cb3SJosh Poimboeuf } 251d83a7cb3SJosh Poimboeuf 252d83a7cb3SJosh Poimboeuf /* 253d83a7cb3SJosh Poimboeuf * Determine whether it's safe to transition the task to the target patch state 254d83a7cb3SJosh Poimboeuf * by looking for any to-be-patched or to-be-unpatched functions on its stack. 255d83a7cb3SJosh Poimboeuf */ 256d83a7cb3SJosh Poimboeuf static int klp_check_stack(struct task_struct *task, char *err_buf) 257d83a7cb3SJosh Poimboeuf { 258d83a7cb3SJosh Poimboeuf static unsigned long entries[MAX_STACK_ENTRIES]; 259d83a7cb3SJosh Poimboeuf struct stack_trace trace; 260d83a7cb3SJosh Poimboeuf struct klp_object *obj; 261d83a7cb3SJosh Poimboeuf struct klp_func *func; 262d83a7cb3SJosh Poimboeuf int ret; 263d83a7cb3SJosh Poimboeuf 264d83a7cb3SJosh Poimboeuf trace.skip = 0; 265d83a7cb3SJosh Poimboeuf trace.nr_entries = 0; 266d83a7cb3SJosh Poimboeuf trace.max_entries = MAX_STACK_ENTRIES; 267d83a7cb3SJosh Poimboeuf trace.entries = entries; 268d83a7cb3SJosh Poimboeuf ret = save_stack_trace_tsk_reliable(task, &trace); 269d83a7cb3SJosh Poimboeuf WARN_ON_ONCE(ret == -ENOSYS); 270d83a7cb3SJosh Poimboeuf if (ret) { 271d83a7cb3SJosh Poimboeuf snprintf(err_buf, STACK_ERR_BUF_SIZE, 272d83a7cb3SJosh Poimboeuf "%s: %s:%d has an unreliable stack\n", 273d83a7cb3SJosh Poimboeuf __func__, task->comm, task->pid); 274d83a7cb3SJosh Poimboeuf return ret; 275d83a7cb3SJosh Poimboeuf } 276d83a7cb3SJosh Poimboeuf 277d83a7cb3SJosh Poimboeuf klp_for_each_object(klp_transition_patch, obj) { 278d83a7cb3SJosh Poimboeuf if (!obj->patched) 279d83a7cb3SJosh Poimboeuf continue; 280d83a7cb3SJosh Poimboeuf klp_for_each_func(obj, func) { 281d83a7cb3SJosh Poimboeuf ret = klp_check_stack_func(func, &trace); 282d83a7cb3SJosh Poimboeuf if (ret) { 283d83a7cb3SJosh Poimboeuf snprintf(err_buf, STACK_ERR_BUF_SIZE, 284d83a7cb3SJosh Poimboeuf "%s: %s:%d is sleeping on function %s\n", 285d83a7cb3SJosh Poimboeuf __func__, task->comm, task->pid, 286d83a7cb3SJosh Poimboeuf func->old_name); 287d83a7cb3SJosh Poimboeuf return ret; 288d83a7cb3SJosh Poimboeuf } 289d83a7cb3SJosh Poimboeuf } 290d83a7cb3SJosh Poimboeuf } 291d83a7cb3SJosh Poimboeuf 292d83a7cb3SJosh Poimboeuf return 0; 293d83a7cb3SJosh Poimboeuf } 294d83a7cb3SJosh Poimboeuf 295d83a7cb3SJosh Poimboeuf /* 296d83a7cb3SJosh Poimboeuf * Try to safely switch a task to the target patch state. If it's currently 297d83a7cb3SJosh Poimboeuf * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or 298d83a7cb3SJosh Poimboeuf * if the stack is unreliable, return false. 299d83a7cb3SJosh Poimboeuf */ 300d83a7cb3SJosh Poimboeuf static bool klp_try_switch_task(struct task_struct *task) 301d83a7cb3SJosh Poimboeuf { 302d83a7cb3SJosh Poimboeuf struct rq *rq; 303d83a7cb3SJosh Poimboeuf struct rq_flags flags; 304d83a7cb3SJosh Poimboeuf int ret; 305d83a7cb3SJosh Poimboeuf bool success = false; 306d83a7cb3SJosh Poimboeuf char err_buf[STACK_ERR_BUF_SIZE]; 307d83a7cb3SJosh Poimboeuf 308d83a7cb3SJosh Poimboeuf err_buf[0] = '\0'; 309d83a7cb3SJosh Poimboeuf 310d83a7cb3SJosh Poimboeuf /* check if this task has already switched over */ 311d83a7cb3SJosh Poimboeuf if (task->patch_state == klp_target_state) 312d83a7cb3SJosh Poimboeuf return true; 313d83a7cb3SJosh Poimboeuf 314d83a7cb3SJosh Poimboeuf /* 315d83a7cb3SJosh Poimboeuf * For arches which don't have reliable stack traces, we have to rely 316d83a7cb3SJosh Poimboeuf * on other methods (e.g., switching tasks at kernel exit). 317d83a7cb3SJosh Poimboeuf */ 318d83a7cb3SJosh Poimboeuf if (!klp_have_reliable_stack()) 319d83a7cb3SJosh Poimboeuf return false; 320d83a7cb3SJosh Poimboeuf 321d83a7cb3SJosh Poimboeuf /* 322d83a7cb3SJosh Poimboeuf * Now try to check the stack for any to-be-patched or to-be-unpatched 323d83a7cb3SJosh Poimboeuf * functions. If all goes well, switch the task to the target patch 324d83a7cb3SJosh Poimboeuf * state. 325d83a7cb3SJosh Poimboeuf */ 326d83a7cb3SJosh Poimboeuf rq = task_rq_lock(task, &flags); 327d83a7cb3SJosh Poimboeuf 328d83a7cb3SJosh Poimboeuf if (task_running(rq, task) && task != current) { 329d83a7cb3SJosh Poimboeuf snprintf(err_buf, STACK_ERR_BUF_SIZE, 330d83a7cb3SJosh Poimboeuf "%s: %s:%d is running\n", __func__, task->comm, 331d83a7cb3SJosh Poimboeuf task->pid); 332d83a7cb3SJosh Poimboeuf goto done; 333d83a7cb3SJosh Poimboeuf } 334d83a7cb3SJosh Poimboeuf 335d83a7cb3SJosh Poimboeuf ret = klp_check_stack(task, err_buf); 336d83a7cb3SJosh Poimboeuf if (ret) 337d83a7cb3SJosh Poimboeuf goto done; 338d83a7cb3SJosh Poimboeuf 339d83a7cb3SJosh Poimboeuf success = true; 340d83a7cb3SJosh Poimboeuf 341d83a7cb3SJosh Poimboeuf clear_tsk_thread_flag(task, TIF_PATCH_PENDING); 342d83a7cb3SJosh Poimboeuf task->patch_state = klp_target_state; 343d83a7cb3SJosh Poimboeuf 344d83a7cb3SJosh Poimboeuf done: 345d83a7cb3SJosh Poimboeuf task_rq_unlock(rq, task, &flags); 346d83a7cb3SJosh Poimboeuf 347d83a7cb3SJosh Poimboeuf /* 348d83a7cb3SJosh Poimboeuf * Due to console deadlock issues, pr_debug() can't be used while 349d83a7cb3SJosh Poimboeuf * holding the task rq lock. Instead we have to use a temporary buffer 350d83a7cb3SJosh Poimboeuf * and print the debug message after releasing the lock. 351d83a7cb3SJosh Poimboeuf */ 352d83a7cb3SJosh Poimboeuf if (err_buf[0] != '\0') 353d83a7cb3SJosh Poimboeuf pr_debug("%s", err_buf); 354d83a7cb3SJosh Poimboeuf 355d83a7cb3SJosh Poimboeuf return success; 356d83a7cb3SJosh Poimboeuf 357d83a7cb3SJosh Poimboeuf } 358d83a7cb3SJosh Poimboeuf 359d83a7cb3SJosh Poimboeuf /* 360d83a7cb3SJosh Poimboeuf * Try to switch all remaining tasks to the target patch state by walking the 361d83a7cb3SJosh Poimboeuf * stacks of sleeping tasks and looking for any to-be-patched or 362d83a7cb3SJosh Poimboeuf * to-be-unpatched functions. If such functions are found, the task can't be 363d83a7cb3SJosh Poimboeuf * switched yet. 364d83a7cb3SJosh Poimboeuf * 365d83a7cb3SJosh Poimboeuf * If any tasks are still stuck in the initial patch state, schedule a retry. 366d83a7cb3SJosh Poimboeuf */ 367d83a7cb3SJosh Poimboeuf void klp_try_complete_transition(void) 368d83a7cb3SJosh Poimboeuf { 369d83a7cb3SJosh Poimboeuf unsigned int cpu; 370d83a7cb3SJosh Poimboeuf struct task_struct *g, *task; 371d83a7cb3SJosh Poimboeuf bool complete = true; 372d83a7cb3SJosh Poimboeuf 373d83a7cb3SJosh Poimboeuf WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); 374d83a7cb3SJosh Poimboeuf 375d83a7cb3SJosh Poimboeuf /* 376d83a7cb3SJosh Poimboeuf * If the patch can be applied or reverted immediately, skip the 377d83a7cb3SJosh Poimboeuf * per-task transitions. 378d83a7cb3SJosh Poimboeuf */ 379d83a7cb3SJosh Poimboeuf if (klp_transition_patch->immediate) 380d83a7cb3SJosh Poimboeuf goto success; 381d83a7cb3SJosh Poimboeuf 382d83a7cb3SJosh Poimboeuf /* 383d83a7cb3SJosh Poimboeuf * Try to switch the tasks to the target patch state by walking their 384d83a7cb3SJosh Poimboeuf * stacks and looking for any to-be-patched or to-be-unpatched 385d83a7cb3SJosh Poimboeuf * functions. If such functions are found on a stack, or if the stack 386d83a7cb3SJosh Poimboeuf * is deemed unreliable, the task can't be switched yet. 387d83a7cb3SJosh Poimboeuf * 388d83a7cb3SJosh Poimboeuf * Usually this will transition most (or all) of the tasks on a system 389d83a7cb3SJosh Poimboeuf * unless the patch includes changes to a very common function. 390d83a7cb3SJosh Poimboeuf */ 391d83a7cb3SJosh Poimboeuf read_lock(&tasklist_lock); 392d83a7cb3SJosh Poimboeuf for_each_process_thread(g, task) 393d83a7cb3SJosh Poimboeuf if (!klp_try_switch_task(task)) 394d83a7cb3SJosh Poimboeuf complete = false; 395d83a7cb3SJosh Poimboeuf read_unlock(&tasklist_lock); 396d83a7cb3SJosh Poimboeuf 397d83a7cb3SJosh Poimboeuf /* 398d83a7cb3SJosh Poimboeuf * Ditto for the idle "swapper" tasks. 399d83a7cb3SJosh Poimboeuf */ 400d83a7cb3SJosh Poimboeuf get_online_cpus(); 401d83a7cb3SJosh Poimboeuf for_each_possible_cpu(cpu) { 402d83a7cb3SJosh Poimboeuf task = idle_task(cpu); 403d83a7cb3SJosh Poimboeuf if (cpu_online(cpu)) { 404d83a7cb3SJosh Poimboeuf if (!klp_try_switch_task(task)) 405d83a7cb3SJosh Poimboeuf complete = false; 406d83a7cb3SJosh Poimboeuf } else if (task->patch_state != klp_target_state) { 407d83a7cb3SJosh Poimboeuf /* offline idle tasks can be switched immediately */ 408d83a7cb3SJosh Poimboeuf clear_tsk_thread_flag(task, TIF_PATCH_PENDING); 409d83a7cb3SJosh Poimboeuf task->patch_state = klp_target_state; 410d83a7cb3SJosh Poimboeuf } 411d83a7cb3SJosh Poimboeuf } 412d83a7cb3SJosh Poimboeuf put_online_cpus(); 413d83a7cb3SJosh Poimboeuf 414d83a7cb3SJosh Poimboeuf if (!complete) { 415d83a7cb3SJosh Poimboeuf /* 416d83a7cb3SJosh Poimboeuf * Some tasks weren't able to be switched over. Try again 417d83a7cb3SJosh Poimboeuf * later and/or wait for other methods like kernel exit 418d83a7cb3SJosh Poimboeuf * switching. 419d83a7cb3SJosh Poimboeuf */ 420d83a7cb3SJosh Poimboeuf schedule_delayed_work(&klp_transition_work, 421d83a7cb3SJosh Poimboeuf round_jiffies_relative(HZ)); 422d83a7cb3SJosh Poimboeuf return; 423d83a7cb3SJosh Poimboeuf } 424d83a7cb3SJosh Poimboeuf 425d83a7cb3SJosh Poimboeuf success: 426d83a7cb3SJosh Poimboeuf pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name, 427d83a7cb3SJosh Poimboeuf klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); 428d83a7cb3SJosh Poimboeuf 429d83a7cb3SJosh Poimboeuf /* we're done, now cleanup the data structures */ 430d83a7cb3SJosh Poimboeuf klp_complete_transition(); 431d83a7cb3SJosh Poimboeuf } 432d83a7cb3SJosh Poimboeuf 433d83a7cb3SJosh Poimboeuf /* 434d83a7cb3SJosh Poimboeuf * Start the transition to the specified target patch state so tasks can begin 435d83a7cb3SJosh Poimboeuf * switching to it. 436d83a7cb3SJosh Poimboeuf */ 437d83a7cb3SJosh Poimboeuf void klp_start_transition(void) 438d83a7cb3SJosh Poimboeuf { 439d83a7cb3SJosh Poimboeuf struct task_struct *g, *task; 440d83a7cb3SJosh Poimboeuf unsigned int cpu; 441d83a7cb3SJosh Poimboeuf 442d83a7cb3SJosh Poimboeuf WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); 443d83a7cb3SJosh Poimboeuf 444d83a7cb3SJosh Poimboeuf pr_notice("'%s': %s...\n", klp_transition_patch->mod->name, 445d83a7cb3SJosh Poimboeuf klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); 446d83a7cb3SJosh Poimboeuf 447d83a7cb3SJosh Poimboeuf /* 448d83a7cb3SJosh Poimboeuf * If the patch can be applied or reverted immediately, skip the 449d83a7cb3SJosh Poimboeuf * per-task transitions. 450d83a7cb3SJosh Poimboeuf */ 451d83a7cb3SJosh Poimboeuf if (klp_transition_patch->immediate) 452d83a7cb3SJosh Poimboeuf return; 453d83a7cb3SJosh Poimboeuf 454d83a7cb3SJosh Poimboeuf /* 455d83a7cb3SJosh Poimboeuf * Mark all normal tasks as needing a patch state update. They'll 456d83a7cb3SJosh Poimboeuf * switch either in klp_try_complete_transition() or as they exit the 457d83a7cb3SJosh Poimboeuf * kernel. 458d83a7cb3SJosh Poimboeuf */ 459d83a7cb3SJosh Poimboeuf read_lock(&tasklist_lock); 460d83a7cb3SJosh Poimboeuf for_each_process_thread(g, task) 461d83a7cb3SJosh Poimboeuf if (task->patch_state != klp_target_state) 462d83a7cb3SJosh Poimboeuf set_tsk_thread_flag(task, TIF_PATCH_PENDING); 463d83a7cb3SJosh Poimboeuf read_unlock(&tasklist_lock); 464d83a7cb3SJosh Poimboeuf 465d83a7cb3SJosh Poimboeuf /* 466d83a7cb3SJosh Poimboeuf * Mark all idle tasks as needing a patch state update. They'll switch 467d83a7cb3SJosh Poimboeuf * either in klp_try_complete_transition() or at the idle loop switch 468d83a7cb3SJosh Poimboeuf * point. 469d83a7cb3SJosh Poimboeuf */ 470d83a7cb3SJosh Poimboeuf for_each_possible_cpu(cpu) { 471d83a7cb3SJosh Poimboeuf task = idle_task(cpu); 472d83a7cb3SJosh Poimboeuf if (task->patch_state != klp_target_state) 473d83a7cb3SJosh Poimboeuf set_tsk_thread_flag(task, TIF_PATCH_PENDING); 474d83a7cb3SJosh Poimboeuf } 475d83a7cb3SJosh Poimboeuf } 476d83a7cb3SJosh Poimboeuf 477d83a7cb3SJosh Poimboeuf /* 478d83a7cb3SJosh Poimboeuf * Initialize the global target patch state and all tasks to the initial patch 479d83a7cb3SJosh Poimboeuf * state, and initialize all function transition states to true in preparation 480d83a7cb3SJosh Poimboeuf * for patching or unpatching. 481d83a7cb3SJosh Poimboeuf */ 482d83a7cb3SJosh Poimboeuf void klp_init_transition(struct klp_patch *patch, int state) 483d83a7cb3SJosh Poimboeuf { 484d83a7cb3SJosh Poimboeuf struct task_struct *g, *task; 485d83a7cb3SJosh Poimboeuf unsigned int cpu; 486d83a7cb3SJosh Poimboeuf struct klp_object *obj; 487d83a7cb3SJosh Poimboeuf struct klp_func *func; 488d83a7cb3SJosh Poimboeuf int initial_state = !state; 489d83a7cb3SJosh Poimboeuf 490d83a7cb3SJosh Poimboeuf WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED); 491d83a7cb3SJosh Poimboeuf 492d83a7cb3SJosh Poimboeuf klp_transition_patch = patch; 493d83a7cb3SJosh Poimboeuf 494d83a7cb3SJosh Poimboeuf /* 495d83a7cb3SJosh Poimboeuf * Set the global target patch state which tasks will switch to. This 496d83a7cb3SJosh Poimboeuf * has no effect until the TIF_PATCH_PENDING flags get set later. 497d83a7cb3SJosh Poimboeuf */ 498d83a7cb3SJosh Poimboeuf klp_target_state = state; 499d83a7cb3SJosh Poimboeuf 500d83a7cb3SJosh Poimboeuf /* 501d83a7cb3SJosh Poimboeuf * If the patch can be applied or reverted immediately, skip the 502d83a7cb3SJosh Poimboeuf * per-task transitions. 503d83a7cb3SJosh Poimboeuf */ 504d83a7cb3SJosh Poimboeuf if (patch->immediate) 505d83a7cb3SJosh Poimboeuf return; 506d83a7cb3SJosh Poimboeuf 507d83a7cb3SJosh Poimboeuf /* 508d83a7cb3SJosh Poimboeuf * Initialize all tasks to the initial patch state to prepare them for 509d83a7cb3SJosh Poimboeuf * switching to the target state. 510d83a7cb3SJosh Poimboeuf */ 511d83a7cb3SJosh Poimboeuf read_lock(&tasklist_lock); 512d83a7cb3SJosh Poimboeuf for_each_process_thread(g, task) { 513d83a7cb3SJosh Poimboeuf WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED); 514d83a7cb3SJosh Poimboeuf task->patch_state = initial_state; 515d83a7cb3SJosh Poimboeuf } 516d83a7cb3SJosh Poimboeuf read_unlock(&tasklist_lock); 517d83a7cb3SJosh Poimboeuf 518d83a7cb3SJosh Poimboeuf /* 519d83a7cb3SJosh Poimboeuf * Ditto for the idle "swapper" tasks. 520d83a7cb3SJosh Poimboeuf */ 521d83a7cb3SJosh Poimboeuf for_each_possible_cpu(cpu) { 522d83a7cb3SJosh Poimboeuf task = idle_task(cpu); 523d83a7cb3SJosh Poimboeuf WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED); 524d83a7cb3SJosh Poimboeuf task->patch_state = initial_state; 525d83a7cb3SJosh Poimboeuf } 526d83a7cb3SJosh Poimboeuf 527d83a7cb3SJosh Poimboeuf /* 528d83a7cb3SJosh Poimboeuf * Enforce the order of the task->patch_state initializations and the 529d83a7cb3SJosh Poimboeuf * func->transition updates to ensure that klp_ftrace_handler() doesn't 530d83a7cb3SJosh Poimboeuf * see a func in transition with a task->patch_state of KLP_UNDEFINED. 531d83a7cb3SJosh Poimboeuf * 532d83a7cb3SJosh Poimboeuf * Also enforce the order of the klp_target_state write and future 533d83a7cb3SJosh Poimboeuf * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() doesn't 534d83a7cb3SJosh Poimboeuf * set a task->patch_state to KLP_UNDEFINED. 535d83a7cb3SJosh Poimboeuf */ 536d83a7cb3SJosh Poimboeuf smp_wmb(); 537d83a7cb3SJosh Poimboeuf 538d83a7cb3SJosh Poimboeuf /* 539d83a7cb3SJosh Poimboeuf * Set the func transition states so klp_ftrace_handler() will know to 540d83a7cb3SJosh Poimboeuf * switch to the transition logic. 541d83a7cb3SJosh Poimboeuf * 542d83a7cb3SJosh Poimboeuf * When patching, the funcs aren't yet in the func_stack and will be 543d83a7cb3SJosh Poimboeuf * made visible to the ftrace handler shortly by the calls to 544d83a7cb3SJosh Poimboeuf * klp_patch_object(). 545d83a7cb3SJosh Poimboeuf * 546d83a7cb3SJosh Poimboeuf * When unpatching, the funcs are already in the func_stack and so are 547d83a7cb3SJosh Poimboeuf * already visible to the ftrace handler. 548d83a7cb3SJosh Poimboeuf */ 549d83a7cb3SJosh Poimboeuf klp_for_each_object(patch, obj) 550d83a7cb3SJosh Poimboeuf klp_for_each_func(obj, func) 551d83a7cb3SJosh Poimboeuf func->transition = true; 552d83a7cb3SJosh Poimboeuf } 553d83a7cb3SJosh Poimboeuf 554d83a7cb3SJosh Poimboeuf /* 555d83a7cb3SJosh Poimboeuf * This function can be called in the middle of an existing transition to 556d83a7cb3SJosh Poimboeuf * reverse the direction of the target patch state. This can be done to 557d83a7cb3SJosh Poimboeuf * effectively cancel an existing enable or disable operation if there are any 558d83a7cb3SJosh Poimboeuf * tasks which are stuck in the initial patch state. 559d83a7cb3SJosh Poimboeuf */ 560d83a7cb3SJosh Poimboeuf void klp_reverse_transition(void) 561d83a7cb3SJosh Poimboeuf { 562d83a7cb3SJosh Poimboeuf unsigned int cpu; 563d83a7cb3SJosh Poimboeuf struct task_struct *g, *task; 564d83a7cb3SJosh Poimboeuf 565d83a7cb3SJosh Poimboeuf klp_transition_patch->enabled = !klp_transition_patch->enabled; 566d83a7cb3SJosh Poimboeuf 567d83a7cb3SJosh Poimboeuf klp_target_state = !klp_target_state; 568d83a7cb3SJosh Poimboeuf 569d83a7cb3SJosh Poimboeuf /* 570d83a7cb3SJosh Poimboeuf * Clear all TIF_PATCH_PENDING flags to prevent races caused by 571d83a7cb3SJosh Poimboeuf * klp_update_patch_state() running in parallel with 572d83a7cb3SJosh Poimboeuf * klp_start_transition(). 573d83a7cb3SJosh Poimboeuf */ 574d83a7cb3SJosh Poimboeuf read_lock(&tasklist_lock); 575d83a7cb3SJosh Poimboeuf for_each_process_thread(g, task) 576d83a7cb3SJosh Poimboeuf clear_tsk_thread_flag(task, TIF_PATCH_PENDING); 577d83a7cb3SJosh Poimboeuf read_unlock(&tasklist_lock); 578d83a7cb3SJosh Poimboeuf 579d83a7cb3SJosh Poimboeuf for_each_possible_cpu(cpu) 580d83a7cb3SJosh Poimboeuf clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING); 581d83a7cb3SJosh Poimboeuf 582d83a7cb3SJosh Poimboeuf /* Let any remaining calls to klp_update_patch_state() complete */ 583842c0884SPetr Mladek klp_synchronize_transition(); 584d83a7cb3SJosh Poimboeuf 585d83a7cb3SJosh Poimboeuf klp_start_transition(); 586d83a7cb3SJosh Poimboeuf } 587d83a7cb3SJosh Poimboeuf 588d83a7cb3SJosh Poimboeuf /* Called from copy_process() during fork */ 589d83a7cb3SJosh Poimboeuf void klp_copy_process(struct task_struct *child) 590d83a7cb3SJosh Poimboeuf { 591d83a7cb3SJosh Poimboeuf child->patch_state = current->patch_state; 592d83a7cb3SJosh Poimboeuf 593d83a7cb3SJosh Poimboeuf /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */ 594d83a7cb3SJosh Poimboeuf } 595