1d83a7cb3SJosh Poimboeuf /* 2d83a7cb3SJosh Poimboeuf * transition.c - Kernel Live Patching transition functions 3d83a7cb3SJosh Poimboeuf * 4d83a7cb3SJosh Poimboeuf * Copyright (C) 2015-2016 Josh Poimboeuf <jpoimboe@redhat.com> 5d83a7cb3SJosh Poimboeuf * 6d83a7cb3SJosh Poimboeuf * This program is free software; you can redistribute it and/or 7d83a7cb3SJosh Poimboeuf * modify it under the terms of the GNU General Public License 8d83a7cb3SJosh Poimboeuf * as published by the Free Software Foundation; either version 2 9d83a7cb3SJosh Poimboeuf * of the License, or (at your option) any later version. 10d83a7cb3SJosh Poimboeuf * 11d83a7cb3SJosh Poimboeuf * This program is distributed in the hope that it will be useful, 12d83a7cb3SJosh Poimboeuf * but WITHOUT ANY WARRANTY; without even the implied warranty of 13d83a7cb3SJosh Poimboeuf * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14d83a7cb3SJosh Poimboeuf * GNU General Public License for more details. 15d83a7cb3SJosh Poimboeuf * 16d83a7cb3SJosh Poimboeuf * You should have received a copy of the GNU General Public License 17d83a7cb3SJosh Poimboeuf * along with this program; if not, see <http://www.gnu.org/licenses/>. 18d83a7cb3SJosh Poimboeuf */ 19d83a7cb3SJosh Poimboeuf 20d83a7cb3SJosh Poimboeuf #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 21d83a7cb3SJosh Poimboeuf 22d83a7cb3SJosh Poimboeuf #include <linux/cpu.h> 23d83a7cb3SJosh Poimboeuf #include <linux/stacktrace.h> 24*10517429SJiri Kosina #include "core.h" 25d83a7cb3SJosh Poimboeuf #include "patch.h" 26d83a7cb3SJosh Poimboeuf #include "transition.h" 27d83a7cb3SJosh Poimboeuf #include "../sched/sched.h" 28d83a7cb3SJosh Poimboeuf 29d83a7cb3SJosh Poimboeuf #define MAX_STACK_ENTRIES 100 30d83a7cb3SJosh Poimboeuf #define STACK_ERR_BUF_SIZE 128 31d83a7cb3SJosh Poimboeuf 32d83a7cb3SJosh Poimboeuf struct klp_patch *klp_transition_patch; 33d83a7cb3SJosh Poimboeuf 34d83a7cb3SJosh Poimboeuf static int klp_target_state = KLP_UNDEFINED; 35d83a7cb3SJosh Poimboeuf 36d83a7cb3SJosh Poimboeuf /* 37d83a7cb3SJosh Poimboeuf * This work can be performed periodically to finish patching or unpatching any 38d83a7cb3SJosh Poimboeuf * "straggler" tasks which failed to transition in the first attempt. 39d83a7cb3SJosh Poimboeuf */ 40d83a7cb3SJosh Poimboeuf static void klp_transition_work_fn(struct work_struct *work) 41d83a7cb3SJosh Poimboeuf { 42d83a7cb3SJosh Poimboeuf mutex_lock(&klp_mutex); 43d83a7cb3SJosh Poimboeuf 44d83a7cb3SJosh Poimboeuf if (klp_transition_patch) 45d83a7cb3SJosh Poimboeuf klp_try_complete_transition(); 46d83a7cb3SJosh Poimboeuf 47d83a7cb3SJosh Poimboeuf mutex_unlock(&klp_mutex); 48d83a7cb3SJosh Poimboeuf } 49d83a7cb3SJosh Poimboeuf static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn); 50d83a7cb3SJosh Poimboeuf 51d83a7cb3SJosh Poimboeuf /* 52d83a7cb3SJosh Poimboeuf * The transition to the target patch state is complete. Clean up the data 53d83a7cb3SJosh Poimboeuf * structures. 54d83a7cb3SJosh Poimboeuf */ 55d83a7cb3SJosh Poimboeuf static void klp_complete_transition(void) 56d83a7cb3SJosh Poimboeuf { 57d83a7cb3SJosh Poimboeuf struct klp_object *obj; 58d83a7cb3SJosh Poimboeuf struct klp_func *func; 59d83a7cb3SJosh Poimboeuf struct task_struct *g, *task; 60d83a7cb3SJosh Poimboeuf unsigned int cpu; 613ec24776SJosh Poimboeuf bool immediate_func = false; 62d83a7cb3SJosh Poimboeuf 63d83a7cb3SJosh Poimboeuf if (klp_target_state == KLP_UNPATCHED) { 64d83a7cb3SJosh Poimboeuf /* 65d83a7cb3SJosh Poimboeuf * All tasks have transitioned to KLP_UNPATCHED so we can now 66d83a7cb3SJosh Poimboeuf * remove the new functions from the func_stack. 67d83a7cb3SJosh Poimboeuf */ 68d83a7cb3SJosh Poimboeuf klp_unpatch_objects(klp_transition_patch); 69d83a7cb3SJosh Poimboeuf 70d83a7cb3SJosh Poimboeuf /* 71d83a7cb3SJosh Poimboeuf * Make sure klp_ftrace_handler() can no longer see functions 72d83a7cb3SJosh Poimboeuf * from this patch on the ops->func_stack. Otherwise, after 73d83a7cb3SJosh Poimboeuf * func->transition gets cleared, the handler may choose a 74d83a7cb3SJosh Poimboeuf * removed function. 75d83a7cb3SJosh Poimboeuf */ 76d83a7cb3SJosh Poimboeuf synchronize_rcu(); 77d83a7cb3SJosh Poimboeuf } 78d83a7cb3SJosh Poimboeuf 79d83a7cb3SJosh Poimboeuf if (klp_transition_patch->immediate) 80d83a7cb3SJosh Poimboeuf goto done; 81d83a7cb3SJosh Poimboeuf 823ec24776SJosh Poimboeuf klp_for_each_object(klp_transition_patch, obj) { 833ec24776SJosh Poimboeuf klp_for_each_func(obj, func) { 84d83a7cb3SJosh Poimboeuf func->transition = false; 853ec24776SJosh Poimboeuf if (func->immediate) 863ec24776SJosh Poimboeuf immediate_func = true; 873ec24776SJosh Poimboeuf } 883ec24776SJosh Poimboeuf } 893ec24776SJosh Poimboeuf 903ec24776SJosh Poimboeuf if (klp_target_state == KLP_UNPATCHED && !immediate_func) 913ec24776SJosh Poimboeuf module_put(klp_transition_patch->mod); 92d83a7cb3SJosh Poimboeuf 93d83a7cb3SJosh Poimboeuf /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */ 94d83a7cb3SJosh Poimboeuf if (klp_target_state == KLP_PATCHED) 95d83a7cb3SJosh Poimboeuf synchronize_rcu(); 96d83a7cb3SJosh Poimboeuf 97d83a7cb3SJosh Poimboeuf read_lock(&tasklist_lock); 98d83a7cb3SJosh Poimboeuf for_each_process_thread(g, task) { 99d83a7cb3SJosh Poimboeuf WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); 100d83a7cb3SJosh Poimboeuf task->patch_state = KLP_UNDEFINED; 101d83a7cb3SJosh Poimboeuf } 102d83a7cb3SJosh Poimboeuf read_unlock(&tasklist_lock); 103d83a7cb3SJosh Poimboeuf 104d83a7cb3SJosh Poimboeuf for_each_possible_cpu(cpu) { 105d83a7cb3SJosh Poimboeuf task = idle_task(cpu); 106d83a7cb3SJosh Poimboeuf WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); 107d83a7cb3SJosh Poimboeuf task->patch_state = KLP_UNDEFINED; 108d83a7cb3SJosh Poimboeuf } 109d83a7cb3SJosh Poimboeuf 110d83a7cb3SJosh Poimboeuf done: 111d83a7cb3SJosh Poimboeuf klp_target_state = KLP_UNDEFINED; 112d83a7cb3SJosh Poimboeuf klp_transition_patch = NULL; 113d83a7cb3SJosh Poimboeuf } 114d83a7cb3SJosh Poimboeuf 115d83a7cb3SJosh Poimboeuf /* 116d83a7cb3SJosh Poimboeuf * This is called in the error path, to cancel a transition before it has 117d83a7cb3SJosh Poimboeuf * started, i.e. klp_init_transition() has been called but 118d83a7cb3SJosh Poimboeuf * klp_start_transition() hasn't. If the transition *has* been started, 119d83a7cb3SJosh Poimboeuf * klp_reverse_transition() should be used instead. 120d83a7cb3SJosh Poimboeuf */ 121d83a7cb3SJosh Poimboeuf void klp_cancel_transition(void) 122d83a7cb3SJosh Poimboeuf { 1233ec24776SJosh Poimboeuf struct klp_patch *patch = klp_transition_patch; 1243ec24776SJosh Poimboeuf struct klp_object *obj; 1253ec24776SJosh Poimboeuf struct klp_func *func; 1263ec24776SJosh Poimboeuf bool immediate_func = false; 1273ec24776SJosh Poimboeuf 1283ec24776SJosh Poimboeuf if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED)) 1293ec24776SJosh Poimboeuf return; 1303ec24776SJosh Poimboeuf 1313ec24776SJosh Poimboeuf klp_target_state = KLP_UNPATCHED; 132d83a7cb3SJosh Poimboeuf klp_complete_transition(); 1333ec24776SJosh Poimboeuf 1343ec24776SJosh Poimboeuf /* 1353ec24776SJosh Poimboeuf * In the enable error path, even immediate patches can be safely 1363ec24776SJosh Poimboeuf * removed because the transition hasn't been started yet. 1373ec24776SJosh Poimboeuf * 1383ec24776SJosh Poimboeuf * klp_complete_transition() doesn't have a module_put() for immediate 1393ec24776SJosh Poimboeuf * patches, so do it here. 1403ec24776SJosh Poimboeuf */ 1413ec24776SJosh Poimboeuf klp_for_each_object(patch, obj) 1423ec24776SJosh Poimboeuf klp_for_each_func(obj, func) 1433ec24776SJosh Poimboeuf if (func->immediate) 1443ec24776SJosh Poimboeuf immediate_func = true; 1453ec24776SJosh Poimboeuf 1463ec24776SJosh Poimboeuf if (patch->immediate || immediate_func) 1473ec24776SJosh Poimboeuf module_put(patch->mod); 148d83a7cb3SJosh Poimboeuf } 149d83a7cb3SJosh Poimboeuf 150d83a7cb3SJosh Poimboeuf /* 151d83a7cb3SJosh Poimboeuf * Switch the patched state of the task to the set of functions in the target 152d83a7cb3SJosh Poimboeuf * patch state. 153d83a7cb3SJosh Poimboeuf * 154d83a7cb3SJosh Poimboeuf * NOTE: If task is not 'current', the caller must ensure the task is inactive. 155d83a7cb3SJosh Poimboeuf * Otherwise klp_ftrace_handler() might read the wrong 'patch_state' value. 156d83a7cb3SJosh Poimboeuf */ 157d83a7cb3SJosh Poimboeuf void klp_update_patch_state(struct task_struct *task) 158d83a7cb3SJosh Poimboeuf { 159d83a7cb3SJosh Poimboeuf rcu_read_lock(); 160d83a7cb3SJosh Poimboeuf 161d83a7cb3SJosh Poimboeuf /* 162d83a7cb3SJosh Poimboeuf * This test_and_clear_tsk_thread_flag() call also serves as a read 163d83a7cb3SJosh Poimboeuf * barrier (smp_rmb) for two cases: 164d83a7cb3SJosh Poimboeuf * 165d83a7cb3SJosh Poimboeuf * 1) Enforce the order of the TIF_PATCH_PENDING read and the 166d83a7cb3SJosh Poimboeuf * klp_target_state read. The corresponding write barrier is in 167d83a7cb3SJosh Poimboeuf * klp_init_transition(). 168d83a7cb3SJosh Poimboeuf * 169d83a7cb3SJosh Poimboeuf * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read 170d83a7cb3SJosh Poimboeuf * of func->transition, if klp_ftrace_handler() is called later on 171d83a7cb3SJosh Poimboeuf * the same CPU. See __klp_disable_patch(). 172d83a7cb3SJosh Poimboeuf */ 173d83a7cb3SJosh Poimboeuf if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING)) 174d83a7cb3SJosh Poimboeuf task->patch_state = READ_ONCE(klp_target_state); 175d83a7cb3SJosh Poimboeuf 176d83a7cb3SJosh Poimboeuf rcu_read_unlock(); 177d83a7cb3SJosh Poimboeuf } 178d83a7cb3SJosh Poimboeuf 179d83a7cb3SJosh Poimboeuf /* 180d83a7cb3SJosh Poimboeuf * Determine whether the given stack trace includes any references to a 181d83a7cb3SJosh Poimboeuf * to-be-patched or to-be-unpatched function. 182d83a7cb3SJosh Poimboeuf */ 183d83a7cb3SJosh Poimboeuf static int klp_check_stack_func(struct klp_func *func, 184d83a7cb3SJosh Poimboeuf struct stack_trace *trace) 185d83a7cb3SJosh Poimboeuf { 186d83a7cb3SJosh Poimboeuf unsigned long func_addr, func_size, address; 187d83a7cb3SJosh Poimboeuf struct klp_ops *ops; 188d83a7cb3SJosh Poimboeuf int i; 189d83a7cb3SJosh Poimboeuf 190d83a7cb3SJosh Poimboeuf if (func->immediate) 191d83a7cb3SJosh Poimboeuf return 0; 192d83a7cb3SJosh Poimboeuf 193d83a7cb3SJosh Poimboeuf for (i = 0; i < trace->nr_entries; i++) { 194d83a7cb3SJosh Poimboeuf address = trace->entries[i]; 195d83a7cb3SJosh Poimboeuf 196d83a7cb3SJosh Poimboeuf if (klp_target_state == KLP_UNPATCHED) { 197d83a7cb3SJosh Poimboeuf /* 198d83a7cb3SJosh Poimboeuf * Check for the to-be-unpatched function 199d83a7cb3SJosh Poimboeuf * (the func itself). 200d83a7cb3SJosh Poimboeuf */ 201d83a7cb3SJosh Poimboeuf func_addr = (unsigned long)func->new_func; 202d83a7cb3SJosh Poimboeuf func_size = func->new_size; 203d83a7cb3SJosh Poimboeuf } else { 204d83a7cb3SJosh Poimboeuf /* 205d83a7cb3SJosh Poimboeuf * Check for the to-be-patched function 206d83a7cb3SJosh Poimboeuf * (the previous func). 207d83a7cb3SJosh Poimboeuf */ 208d83a7cb3SJosh Poimboeuf ops = klp_find_ops(func->old_addr); 209d83a7cb3SJosh Poimboeuf 210d83a7cb3SJosh Poimboeuf if (list_is_singular(&ops->func_stack)) { 211d83a7cb3SJosh Poimboeuf /* original function */ 212d83a7cb3SJosh Poimboeuf func_addr = func->old_addr; 213d83a7cb3SJosh Poimboeuf func_size = func->old_size; 214d83a7cb3SJosh Poimboeuf } else { 215d83a7cb3SJosh Poimboeuf /* previously patched function */ 216d83a7cb3SJosh Poimboeuf struct klp_func *prev; 217d83a7cb3SJosh Poimboeuf 218d83a7cb3SJosh Poimboeuf prev = list_next_entry(func, stack_node); 219d83a7cb3SJosh Poimboeuf func_addr = (unsigned long)prev->new_func; 220d83a7cb3SJosh Poimboeuf func_size = prev->new_size; 221d83a7cb3SJosh Poimboeuf } 222d83a7cb3SJosh Poimboeuf } 223d83a7cb3SJosh Poimboeuf 224d83a7cb3SJosh Poimboeuf if (address >= func_addr && address < func_addr + func_size) 225d83a7cb3SJosh Poimboeuf return -EAGAIN; 226d83a7cb3SJosh Poimboeuf } 227d83a7cb3SJosh Poimboeuf 228d83a7cb3SJosh Poimboeuf return 0; 229d83a7cb3SJosh Poimboeuf } 230d83a7cb3SJosh Poimboeuf 231d83a7cb3SJosh Poimboeuf /* 232d83a7cb3SJosh Poimboeuf * Determine whether it's safe to transition the task to the target patch state 233d83a7cb3SJosh Poimboeuf * by looking for any to-be-patched or to-be-unpatched functions on its stack. 234d83a7cb3SJosh Poimboeuf */ 235d83a7cb3SJosh Poimboeuf static int klp_check_stack(struct task_struct *task, char *err_buf) 236d83a7cb3SJosh Poimboeuf { 237d83a7cb3SJosh Poimboeuf static unsigned long entries[MAX_STACK_ENTRIES]; 238d83a7cb3SJosh Poimboeuf struct stack_trace trace; 239d83a7cb3SJosh Poimboeuf struct klp_object *obj; 240d83a7cb3SJosh Poimboeuf struct klp_func *func; 241d83a7cb3SJosh Poimboeuf int ret; 242d83a7cb3SJosh Poimboeuf 243d83a7cb3SJosh Poimboeuf trace.skip = 0; 244d83a7cb3SJosh Poimboeuf trace.nr_entries = 0; 245d83a7cb3SJosh Poimboeuf trace.max_entries = MAX_STACK_ENTRIES; 246d83a7cb3SJosh Poimboeuf trace.entries = entries; 247d83a7cb3SJosh Poimboeuf ret = save_stack_trace_tsk_reliable(task, &trace); 248d83a7cb3SJosh Poimboeuf WARN_ON_ONCE(ret == -ENOSYS); 249d83a7cb3SJosh Poimboeuf if (ret) { 250d83a7cb3SJosh Poimboeuf snprintf(err_buf, STACK_ERR_BUF_SIZE, 251d83a7cb3SJosh Poimboeuf "%s: %s:%d has an unreliable stack\n", 252d83a7cb3SJosh Poimboeuf __func__, task->comm, task->pid); 253d83a7cb3SJosh Poimboeuf return ret; 254d83a7cb3SJosh Poimboeuf } 255d83a7cb3SJosh Poimboeuf 256d83a7cb3SJosh Poimboeuf klp_for_each_object(klp_transition_patch, obj) { 257d83a7cb3SJosh Poimboeuf if (!obj->patched) 258d83a7cb3SJosh Poimboeuf continue; 259d83a7cb3SJosh Poimboeuf klp_for_each_func(obj, func) { 260d83a7cb3SJosh Poimboeuf ret = klp_check_stack_func(func, &trace); 261d83a7cb3SJosh Poimboeuf if (ret) { 262d83a7cb3SJosh Poimboeuf snprintf(err_buf, STACK_ERR_BUF_SIZE, 263d83a7cb3SJosh Poimboeuf "%s: %s:%d is sleeping on function %s\n", 264d83a7cb3SJosh Poimboeuf __func__, task->comm, task->pid, 265d83a7cb3SJosh Poimboeuf func->old_name); 266d83a7cb3SJosh Poimboeuf return ret; 267d83a7cb3SJosh Poimboeuf } 268d83a7cb3SJosh Poimboeuf } 269d83a7cb3SJosh Poimboeuf } 270d83a7cb3SJosh Poimboeuf 271d83a7cb3SJosh Poimboeuf return 0; 272d83a7cb3SJosh Poimboeuf } 273d83a7cb3SJosh Poimboeuf 274d83a7cb3SJosh Poimboeuf /* 275d83a7cb3SJosh Poimboeuf * Try to safely switch a task to the target patch state. If it's currently 276d83a7cb3SJosh Poimboeuf * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or 277d83a7cb3SJosh Poimboeuf * if the stack is unreliable, return false. 278d83a7cb3SJosh Poimboeuf */ 279d83a7cb3SJosh Poimboeuf static bool klp_try_switch_task(struct task_struct *task) 280d83a7cb3SJosh Poimboeuf { 281d83a7cb3SJosh Poimboeuf struct rq *rq; 282d83a7cb3SJosh Poimboeuf struct rq_flags flags; 283d83a7cb3SJosh Poimboeuf int ret; 284d83a7cb3SJosh Poimboeuf bool success = false; 285d83a7cb3SJosh Poimboeuf char err_buf[STACK_ERR_BUF_SIZE]; 286d83a7cb3SJosh Poimboeuf 287d83a7cb3SJosh Poimboeuf err_buf[0] = '\0'; 288d83a7cb3SJosh Poimboeuf 289d83a7cb3SJosh Poimboeuf /* check if this task has already switched over */ 290d83a7cb3SJosh Poimboeuf if (task->patch_state == klp_target_state) 291d83a7cb3SJosh Poimboeuf return true; 292d83a7cb3SJosh Poimboeuf 293d83a7cb3SJosh Poimboeuf /* 294d83a7cb3SJosh Poimboeuf * For arches which don't have reliable stack traces, we have to rely 295d83a7cb3SJosh Poimboeuf * on other methods (e.g., switching tasks at kernel exit). 296d83a7cb3SJosh Poimboeuf */ 297d83a7cb3SJosh Poimboeuf if (!klp_have_reliable_stack()) 298d83a7cb3SJosh Poimboeuf return false; 299d83a7cb3SJosh Poimboeuf 300d83a7cb3SJosh Poimboeuf /* 301d83a7cb3SJosh Poimboeuf * Now try to check the stack for any to-be-patched or to-be-unpatched 302d83a7cb3SJosh Poimboeuf * functions. If all goes well, switch the task to the target patch 303d83a7cb3SJosh Poimboeuf * state. 304d83a7cb3SJosh Poimboeuf */ 305d83a7cb3SJosh Poimboeuf rq = task_rq_lock(task, &flags); 306d83a7cb3SJosh Poimboeuf 307d83a7cb3SJosh Poimboeuf if (task_running(rq, task) && task != current) { 308d83a7cb3SJosh Poimboeuf snprintf(err_buf, STACK_ERR_BUF_SIZE, 309d83a7cb3SJosh Poimboeuf "%s: %s:%d is running\n", __func__, task->comm, 310d83a7cb3SJosh Poimboeuf task->pid); 311d83a7cb3SJosh Poimboeuf goto done; 312d83a7cb3SJosh Poimboeuf } 313d83a7cb3SJosh Poimboeuf 314d83a7cb3SJosh Poimboeuf ret = klp_check_stack(task, err_buf); 315d83a7cb3SJosh Poimboeuf if (ret) 316d83a7cb3SJosh Poimboeuf goto done; 317d83a7cb3SJosh Poimboeuf 318d83a7cb3SJosh Poimboeuf success = true; 319d83a7cb3SJosh Poimboeuf 320d83a7cb3SJosh Poimboeuf clear_tsk_thread_flag(task, TIF_PATCH_PENDING); 321d83a7cb3SJosh Poimboeuf task->patch_state = klp_target_state; 322d83a7cb3SJosh Poimboeuf 323d83a7cb3SJosh Poimboeuf done: 324d83a7cb3SJosh Poimboeuf task_rq_unlock(rq, task, &flags); 325d83a7cb3SJosh Poimboeuf 326d83a7cb3SJosh Poimboeuf /* 327d83a7cb3SJosh Poimboeuf * Due to console deadlock issues, pr_debug() can't be used while 328d83a7cb3SJosh Poimboeuf * holding the task rq lock. Instead we have to use a temporary buffer 329d83a7cb3SJosh Poimboeuf * and print the debug message after releasing the lock. 330d83a7cb3SJosh Poimboeuf */ 331d83a7cb3SJosh Poimboeuf if (err_buf[0] != '\0') 332d83a7cb3SJosh Poimboeuf pr_debug("%s", err_buf); 333d83a7cb3SJosh Poimboeuf 334d83a7cb3SJosh Poimboeuf return success; 335d83a7cb3SJosh Poimboeuf 336d83a7cb3SJosh Poimboeuf } 337d83a7cb3SJosh Poimboeuf 338d83a7cb3SJosh Poimboeuf /* 339d83a7cb3SJosh Poimboeuf * Try to switch all remaining tasks to the target patch state by walking the 340d83a7cb3SJosh Poimboeuf * stacks of sleeping tasks and looking for any to-be-patched or 341d83a7cb3SJosh Poimboeuf * to-be-unpatched functions. If such functions are found, the task can't be 342d83a7cb3SJosh Poimboeuf * switched yet. 343d83a7cb3SJosh Poimboeuf * 344d83a7cb3SJosh Poimboeuf * If any tasks are still stuck in the initial patch state, schedule a retry. 345d83a7cb3SJosh Poimboeuf */ 346d83a7cb3SJosh Poimboeuf void klp_try_complete_transition(void) 347d83a7cb3SJosh Poimboeuf { 348d83a7cb3SJosh Poimboeuf unsigned int cpu; 349d83a7cb3SJosh Poimboeuf struct task_struct *g, *task; 350d83a7cb3SJosh Poimboeuf bool complete = true; 351d83a7cb3SJosh Poimboeuf 352d83a7cb3SJosh Poimboeuf WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); 353d83a7cb3SJosh Poimboeuf 354d83a7cb3SJosh Poimboeuf /* 355d83a7cb3SJosh Poimboeuf * If the patch can be applied or reverted immediately, skip the 356d83a7cb3SJosh Poimboeuf * per-task transitions. 357d83a7cb3SJosh Poimboeuf */ 358d83a7cb3SJosh Poimboeuf if (klp_transition_patch->immediate) 359d83a7cb3SJosh Poimboeuf goto success; 360d83a7cb3SJosh Poimboeuf 361d83a7cb3SJosh Poimboeuf /* 362d83a7cb3SJosh Poimboeuf * Try to switch the tasks to the target patch state by walking their 363d83a7cb3SJosh Poimboeuf * stacks and looking for any to-be-patched or to-be-unpatched 364d83a7cb3SJosh Poimboeuf * functions. If such functions are found on a stack, or if the stack 365d83a7cb3SJosh Poimboeuf * is deemed unreliable, the task can't be switched yet. 366d83a7cb3SJosh Poimboeuf * 367d83a7cb3SJosh Poimboeuf * Usually this will transition most (or all) of the tasks on a system 368d83a7cb3SJosh Poimboeuf * unless the patch includes changes to a very common function. 369d83a7cb3SJosh Poimboeuf */ 370d83a7cb3SJosh Poimboeuf read_lock(&tasklist_lock); 371d83a7cb3SJosh Poimboeuf for_each_process_thread(g, task) 372d83a7cb3SJosh Poimboeuf if (!klp_try_switch_task(task)) 373d83a7cb3SJosh Poimboeuf complete = false; 374d83a7cb3SJosh Poimboeuf read_unlock(&tasklist_lock); 375d83a7cb3SJosh Poimboeuf 376d83a7cb3SJosh Poimboeuf /* 377d83a7cb3SJosh Poimboeuf * Ditto for the idle "swapper" tasks. 378d83a7cb3SJosh Poimboeuf */ 379d83a7cb3SJosh Poimboeuf get_online_cpus(); 380d83a7cb3SJosh Poimboeuf for_each_possible_cpu(cpu) { 381d83a7cb3SJosh Poimboeuf task = idle_task(cpu); 382d83a7cb3SJosh Poimboeuf if (cpu_online(cpu)) { 383d83a7cb3SJosh Poimboeuf if (!klp_try_switch_task(task)) 384d83a7cb3SJosh Poimboeuf complete = false; 385d83a7cb3SJosh Poimboeuf } else if (task->patch_state != klp_target_state) { 386d83a7cb3SJosh Poimboeuf /* offline idle tasks can be switched immediately */ 387d83a7cb3SJosh Poimboeuf clear_tsk_thread_flag(task, TIF_PATCH_PENDING); 388d83a7cb3SJosh Poimboeuf task->patch_state = klp_target_state; 389d83a7cb3SJosh Poimboeuf } 390d83a7cb3SJosh Poimboeuf } 391d83a7cb3SJosh Poimboeuf put_online_cpus(); 392d83a7cb3SJosh Poimboeuf 393d83a7cb3SJosh Poimboeuf if (!complete) { 394d83a7cb3SJosh Poimboeuf /* 395d83a7cb3SJosh Poimboeuf * Some tasks weren't able to be switched over. Try again 396d83a7cb3SJosh Poimboeuf * later and/or wait for other methods like kernel exit 397d83a7cb3SJosh Poimboeuf * switching. 398d83a7cb3SJosh Poimboeuf */ 399d83a7cb3SJosh Poimboeuf schedule_delayed_work(&klp_transition_work, 400d83a7cb3SJosh Poimboeuf round_jiffies_relative(HZ)); 401d83a7cb3SJosh Poimboeuf return; 402d83a7cb3SJosh Poimboeuf } 403d83a7cb3SJosh Poimboeuf 404d83a7cb3SJosh Poimboeuf success: 405d83a7cb3SJosh Poimboeuf pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name, 406d83a7cb3SJosh Poimboeuf klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); 407d83a7cb3SJosh Poimboeuf 408d83a7cb3SJosh Poimboeuf /* we're done, now cleanup the data structures */ 409d83a7cb3SJosh Poimboeuf klp_complete_transition(); 410d83a7cb3SJosh Poimboeuf } 411d83a7cb3SJosh Poimboeuf 412d83a7cb3SJosh Poimboeuf /* 413d83a7cb3SJosh Poimboeuf * Start the transition to the specified target patch state so tasks can begin 414d83a7cb3SJosh Poimboeuf * switching to it. 415d83a7cb3SJosh Poimboeuf */ 416d83a7cb3SJosh Poimboeuf void klp_start_transition(void) 417d83a7cb3SJosh Poimboeuf { 418d83a7cb3SJosh Poimboeuf struct task_struct *g, *task; 419d83a7cb3SJosh Poimboeuf unsigned int cpu; 420d83a7cb3SJosh Poimboeuf 421d83a7cb3SJosh Poimboeuf WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); 422d83a7cb3SJosh Poimboeuf 423d83a7cb3SJosh Poimboeuf pr_notice("'%s': %s...\n", klp_transition_patch->mod->name, 424d83a7cb3SJosh Poimboeuf klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); 425d83a7cb3SJosh Poimboeuf 426d83a7cb3SJosh Poimboeuf /* 427d83a7cb3SJosh Poimboeuf * If the patch can be applied or reverted immediately, skip the 428d83a7cb3SJosh Poimboeuf * per-task transitions. 429d83a7cb3SJosh Poimboeuf */ 430d83a7cb3SJosh Poimboeuf if (klp_transition_patch->immediate) 431d83a7cb3SJosh Poimboeuf return; 432d83a7cb3SJosh Poimboeuf 433d83a7cb3SJosh Poimboeuf /* 434d83a7cb3SJosh Poimboeuf * Mark all normal tasks as needing a patch state update. They'll 435d83a7cb3SJosh Poimboeuf * switch either in klp_try_complete_transition() or as they exit the 436d83a7cb3SJosh Poimboeuf * kernel. 437d83a7cb3SJosh Poimboeuf */ 438d83a7cb3SJosh Poimboeuf read_lock(&tasklist_lock); 439d83a7cb3SJosh Poimboeuf for_each_process_thread(g, task) 440d83a7cb3SJosh Poimboeuf if (task->patch_state != klp_target_state) 441d83a7cb3SJosh Poimboeuf set_tsk_thread_flag(task, TIF_PATCH_PENDING); 442d83a7cb3SJosh Poimboeuf read_unlock(&tasklist_lock); 443d83a7cb3SJosh Poimboeuf 444d83a7cb3SJosh Poimboeuf /* 445d83a7cb3SJosh Poimboeuf * Mark all idle tasks as needing a patch state update. They'll switch 446d83a7cb3SJosh Poimboeuf * either in klp_try_complete_transition() or at the idle loop switch 447d83a7cb3SJosh Poimboeuf * point. 448d83a7cb3SJosh Poimboeuf */ 449d83a7cb3SJosh Poimboeuf for_each_possible_cpu(cpu) { 450d83a7cb3SJosh Poimboeuf task = idle_task(cpu); 451d83a7cb3SJosh Poimboeuf if (task->patch_state != klp_target_state) 452d83a7cb3SJosh Poimboeuf set_tsk_thread_flag(task, TIF_PATCH_PENDING); 453d83a7cb3SJosh Poimboeuf } 454d83a7cb3SJosh Poimboeuf } 455d83a7cb3SJosh Poimboeuf 456d83a7cb3SJosh Poimboeuf /* 457d83a7cb3SJosh Poimboeuf * Initialize the global target patch state and all tasks to the initial patch 458d83a7cb3SJosh Poimboeuf * state, and initialize all function transition states to true in preparation 459d83a7cb3SJosh Poimboeuf * for patching or unpatching. 460d83a7cb3SJosh Poimboeuf */ 461d83a7cb3SJosh Poimboeuf void klp_init_transition(struct klp_patch *patch, int state) 462d83a7cb3SJosh Poimboeuf { 463d83a7cb3SJosh Poimboeuf struct task_struct *g, *task; 464d83a7cb3SJosh Poimboeuf unsigned int cpu; 465d83a7cb3SJosh Poimboeuf struct klp_object *obj; 466d83a7cb3SJosh Poimboeuf struct klp_func *func; 467d83a7cb3SJosh Poimboeuf int initial_state = !state; 468d83a7cb3SJosh Poimboeuf 469d83a7cb3SJosh Poimboeuf WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED); 470d83a7cb3SJosh Poimboeuf 471d83a7cb3SJosh Poimboeuf klp_transition_patch = patch; 472d83a7cb3SJosh Poimboeuf 473d83a7cb3SJosh Poimboeuf /* 474d83a7cb3SJosh Poimboeuf * Set the global target patch state which tasks will switch to. This 475d83a7cb3SJosh Poimboeuf * has no effect until the TIF_PATCH_PENDING flags get set later. 476d83a7cb3SJosh Poimboeuf */ 477d83a7cb3SJosh Poimboeuf klp_target_state = state; 478d83a7cb3SJosh Poimboeuf 479d83a7cb3SJosh Poimboeuf /* 480d83a7cb3SJosh Poimboeuf * If the patch can be applied or reverted immediately, skip the 481d83a7cb3SJosh Poimboeuf * per-task transitions. 482d83a7cb3SJosh Poimboeuf */ 483d83a7cb3SJosh Poimboeuf if (patch->immediate) 484d83a7cb3SJosh Poimboeuf return; 485d83a7cb3SJosh Poimboeuf 486d83a7cb3SJosh Poimboeuf /* 487d83a7cb3SJosh Poimboeuf * Initialize all tasks to the initial patch state to prepare them for 488d83a7cb3SJosh Poimboeuf * switching to the target state. 489d83a7cb3SJosh Poimboeuf */ 490d83a7cb3SJosh Poimboeuf read_lock(&tasklist_lock); 491d83a7cb3SJosh Poimboeuf for_each_process_thread(g, task) { 492d83a7cb3SJosh Poimboeuf WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED); 493d83a7cb3SJosh Poimboeuf task->patch_state = initial_state; 494d83a7cb3SJosh Poimboeuf } 495d83a7cb3SJosh Poimboeuf read_unlock(&tasklist_lock); 496d83a7cb3SJosh Poimboeuf 497d83a7cb3SJosh Poimboeuf /* 498d83a7cb3SJosh Poimboeuf * Ditto for the idle "swapper" tasks. 499d83a7cb3SJosh Poimboeuf */ 500d83a7cb3SJosh Poimboeuf for_each_possible_cpu(cpu) { 501d83a7cb3SJosh Poimboeuf task = idle_task(cpu); 502d83a7cb3SJosh Poimboeuf WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED); 503d83a7cb3SJosh Poimboeuf task->patch_state = initial_state; 504d83a7cb3SJosh Poimboeuf } 505d83a7cb3SJosh Poimboeuf 506d83a7cb3SJosh Poimboeuf /* 507d83a7cb3SJosh Poimboeuf * Enforce the order of the task->patch_state initializations and the 508d83a7cb3SJosh Poimboeuf * func->transition updates to ensure that klp_ftrace_handler() doesn't 509d83a7cb3SJosh Poimboeuf * see a func in transition with a task->patch_state of KLP_UNDEFINED. 510d83a7cb3SJosh Poimboeuf * 511d83a7cb3SJosh Poimboeuf * Also enforce the order of the klp_target_state write and future 512d83a7cb3SJosh Poimboeuf * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() doesn't 513d83a7cb3SJosh Poimboeuf * set a task->patch_state to KLP_UNDEFINED. 514d83a7cb3SJosh Poimboeuf */ 515d83a7cb3SJosh Poimboeuf smp_wmb(); 516d83a7cb3SJosh Poimboeuf 517d83a7cb3SJosh Poimboeuf /* 518d83a7cb3SJosh Poimboeuf * Set the func transition states so klp_ftrace_handler() will know to 519d83a7cb3SJosh Poimboeuf * switch to the transition logic. 520d83a7cb3SJosh Poimboeuf * 521d83a7cb3SJosh Poimboeuf * When patching, the funcs aren't yet in the func_stack and will be 522d83a7cb3SJosh Poimboeuf * made visible to the ftrace handler shortly by the calls to 523d83a7cb3SJosh Poimboeuf * klp_patch_object(). 524d83a7cb3SJosh Poimboeuf * 525d83a7cb3SJosh Poimboeuf * When unpatching, the funcs are already in the func_stack and so are 526d83a7cb3SJosh Poimboeuf * already visible to the ftrace handler. 527d83a7cb3SJosh Poimboeuf */ 528d83a7cb3SJosh Poimboeuf klp_for_each_object(patch, obj) 529d83a7cb3SJosh Poimboeuf klp_for_each_func(obj, func) 530d83a7cb3SJosh Poimboeuf func->transition = true; 531d83a7cb3SJosh Poimboeuf } 532d83a7cb3SJosh Poimboeuf 533d83a7cb3SJosh Poimboeuf /* 534d83a7cb3SJosh Poimboeuf * This function can be called in the middle of an existing transition to 535d83a7cb3SJosh Poimboeuf * reverse the direction of the target patch state. This can be done to 536d83a7cb3SJosh Poimboeuf * effectively cancel an existing enable or disable operation if there are any 537d83a7cb3SJosh Poimboeuf * tasks which are stuck in the initial patch state. 538d83a7cb3SJosh Poimboeuf */ 539d83a7cb3SJosh Poimboeuf void klp_reverse_transition(void) 540d83a7cb3SJosh Poimboeuf { 541d83a7cb3SJosh Poimboeuf unsigned int cpu; 542d83a7cb3SJosh Poimboeuf struct task_struct *g, *task; 543d83a7cb3SJosh Poimboeuf 544d83a7cb3SJosh Poimboeuf klp_transition_patch->enabled = !klp_transition_patch->enabled; 545d83a7cb3SJosh Poimboeuf 546d83a7cb3SJosh Poimboeuf klp_target_state = !klp_target_state; 547d83a7cb3SJosh Poimboeuf 548d83a7cb3SJosh Poimboeuf /* 549d83a7cb3SJosh Poimboeuf * Clear all TIF_PATCH_PENDING flags to prevent races caused by 550d83a7cb3SJosh Poimboeuf * klp_update_patch_state() running in parallel with 551d83a7cb3SJosh Poimboeuf * klp_start_transition(). 552d83a7cb3SJosh Poimboeuf */ 553d83a7cb3SJosh Poimboeuf read_lock(&tasklist_lock); 554d83a7cb3SJosh Poimboeuf for_each_process_thread(g, task) 555d83a7cb3SJosh Poimboeuf clear_tsk_thread_flag(task, TIF_PATCH_PENDING); 556d83a7cb3SJosh Poimboeuf read_unlock(&tasklist_lock); 557d83a7cb3SJosh Poimboeuf 558d83a7cb3SJosh Poimboeuf for_each_possible_cpu(cpu) 559d83a7cb3SJosh Poimboeuf clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING); 560d83a7cb3SJosh Poimboeuf 561d83a7cb3SJosh Poimboeuf /* Let any remaining calls to klp_update_patch_state() complete */ 562d83a7cb3SJosh Poimboeuf synchronize_rcu(); 563d83a7cb3SJosh Poimboeuf 564d83a7cb3SJosh Poimboeuf klp_start_transition(); 565d83a7cb3SJosh Poimboeuf } 566d83a7cb3SJosh Poimboeuf 567d83a7cb3SJosh Poimboeuf /* Called from copy_process() during fork */ 568d83a7cb3SJosh Poimboeuf void klp_copy_process(struct task_struct *child) 569d83a7cb3SJosh Poimboeuf { 570d83a7cb3SJosh Poimboeuf child->patch_state = current->patch_state; 571d83a7cb3SJosh Poimboeuf 572d83a7cb3SJosh Poimboeuf /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */ 573d83a7cb3SJosh Poimboeuf } 574