1*1ccea77eSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 2d83a7cb3SJosh Poimboeuf /* 3d83a7cb3SJosh Poimboeuf * transition.c - Kernel Live Patching transition functions 4d83a7cb3SJosh Poimboeuf * 5d83a7cb3SJosh Poimboeuf * Copyright (C) 2015-2016 Josh Poimboeuf <jpoimboe@redhat.com> 6d83a7cb3SJosh Poimboeuf */ 7d83a7cb3SJosh Poimboeuf 8d83a7cb3SJosh Poimboeuf #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9d83a7cb3SJosh Poimboeuf 10d83a7cb3SJosh Poimboeuf #include <linux/cpu.h> 11d83a7cb3SJosh Poimboeuf #include <linux/stacktrace.h> 1210517429SJiri Kosina #include "core.h" 13d83a7cb3SJosh Poimboeuf #include "patch.h" 14d83a7cb3SJosh Poimboeuf #include "transition.h" 15d83a7cb3SJosh Poimboeuf #include "../sched/sched.h" 16d83a7cb3SJosh Poimboeuf 17d83a7cb3SJosh Poimboeuf #define MAX_STACK_ENTRIES 100 18d83a7cb3SJosh Poimboeuf #define STACK_ERR_BUF_SIZE 128 19d83a7cb3SJosh Poimboeuf 20cba82deaSMiroslav Benes #define SIGNALS_TIMEOUT 15 21cba82deaSMiroslav Benes 22d83a7cb3SJosh Poimboeuf struct klp_patch *klp_transition_patch; 23d83a7cb3SJosh Poimboeuf 24d83a7cb3SJosh Poimboeuf static int klp_target_state = KLP_UNDEFINED; 25d83a7cb3SJosh Poimboeuf 26cba82deaSMiroslav Benes static unsigned int klp_signals_cnt; 27cba82deaSMiroslav Benes 28d83a7cb3SJosh Poimboeuf /* 29d83a7cb3SJosh Poimboeuf * This work can be performed periodically to finish patching or unpatching any 30d83a7cb3SJosh Poimboeuf * "straggler" tasks which failed to transition in the first attempt. 31d83a7cb3SJosh Poimboeuf */ 32d83a7cb3SJosh Poimboeuf static void klp_transition_work_fn(struct work_struct *work) 33d83a7cb3SJosh Poimboeuf { 34d83a7cb3SJosh Poimboeuf mutex_lock(&klp_mutex); 35d83a7cb3SJosh Poimboeuf 36d83a7cb3SJosh Poimboeuf if (klp_transition_patch) 37d83a7cb3SJosh Poimboeuf klp_try_complete_transition(); 38d83a7cb3SJosh Poimboeuf 39d83a7cb3SJosh Poimboeuf mutex_unlock(&klp_mutex); 40d83a7cb3SJosh Poimboeuf } 41d83a7cb3SJosh Poimboeuf static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn); 42d83a7cb3SJosh Poimboeuf 43d83a7cb3SJosh Poimboeuf /* 44842c0884SPetr Mladek * This function is just a stub to implement a hard force 456932689eSPaul E. McKenney * of synchronize_rcu(). This requires synchronizing 46842c0884SPetr Mladek * tasks even in userspace and idle. 47842c0884SPetr Mladek */ 48842c0884SPetr Mladek static void klp_sync(struct work_struct *work) 49842c0884SPetr Mladek { 50842c0884SPetr Mladek } 51842c0884SPetr Mladek 52842c0884SPetr Mladek /* 53842c0884SPetr Mladek * We allow to patch also functions where RCU is not watching, 54842c0884SPetr Mladek * e.g. before user_exit(). We can not rely on the RCU infrastructure 55842c0884SPetr Mladek * to do the synchronization. Instead hard force the sched synchronization. 56842c0884SPetr Mladek * 57842c0884SPetr Mladek * This approach allows to use RCU functions for manipulating func_stack 58842c0884SPetr Mladek * safely. 59842c0884SPetr Mladek */ 60842c0884SPetr Mladek static void klp_synchronize_transition(void) 61842c0884SPetr Mladek { 62842c0884SPetr Mladek schedule_on_each_cpu(klp_sync); 63842c0884SPetr Mladek } 64842c0884SPetr Mladek 65842c0884SPetr Mladek /* 66d83a7cb3SJosh Poimboeuf * The transition to the target patch state is complete. Clean up the data 67d83a7cb3SJosh Poimboeuf * structures. 68d83a7cb3SJosh Poimboeuf */ 69d83a7cb3SJosh Poimboeuf static void klp_complete_transition(void) 70d83a7cb3SJosh Poimboeuf { 71d83a7cb3SJosh Poimboeuf struct klp_object *obj; 72d83a7cb3SJosh Poimboeuf struct klp_func *func; 73d83a7cb3SJosh Poimboeuf struct task_struct *g, *task; 74d83a7cb3SJosh Poimboeuf unsigned int cpu; 75d83a7cb3SJosh Poimboeuf 76af026796SJoe Lawrence pr_debug("'%s': completing %s transition\n", 77af026796SJoe Lawrence klp_transition_patch->mod->name, 78af026796SJoe Lawrence klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); 79af026796SJoe Lawrence 80d697bad5SPetr Mladek if (klp_transition_patch->replace && klp_target_state == KLP_PATCHED) { 81e1452b60SJason Baron klp_discard_replaced_patches(klp_transition_patch); 82d697bad5SPetr Mladek klp_discard_nops(klp_transition_patch); 83d697bad5SPetr Mladek } 84e1452b60SJason Baron 85d83a7cb3SJosh Poimboeuf if (klp_target_state == KLP_UNPATCHED) { 86d83a7cb3SJosh Poimboeuf /* 87d83a7cb3SJosh Poimboeuf * All tasks have transitioned to KLP_UNPATCHED so we can now 88d83a7cb3SJosh Poimboeuf * remove the new functions from the func_stack. 89d83a7cb3SJosh Poimboeuf */ 90d83a7cb3SJosh Poimboeuf klp_unpatch_objects(klp_transition_patch); 91d83a7cb3SJosh Poimboeuf 92d83a7cb3SJosh Poimboeuf /* 93d83a7cb3SJosh Poimboeuf * Make sure klp_ftrace_handler() can no longer see functions 94d83a7cb3SJosh Poimboeuf * from this patch on the ops->func_stack. Otherwise, after 95d83a7cb3SJosh Poimboeuf * func->transition gets cleared, the handler may choose a 96d83a7cb3SJosh Poimboeuf * removed function. 97d83a7cb3SJosh Poimboeuf */ 98842c0884SPetr Mladek klp_synchronize_transition(); 99d83a7cb3SJosh Poimboeuf } 100d83a7cb3SJosh Poimboeuf 101d0807da7SMiroslav Benes klp_for_each_object(klp_transition_patch, obj) 102d0807da7SMiroslav Benes klp_for_each_func(obj, func) 103d83a7cb3SJosh Poimboeuf func->transition = false; 1043ec24776SJosh Poimboeuf 105d83a7cb3SJosh Poimboeuf /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */ 106d83a7cb3SJosh Poimboeuf if (klp_target_state == KLP_PATCHED) 107842c0884SPetr Mladek klp_synchronize_transition(); 108d83a7cb3SJosh Poimboeuf 109d83a7cb3SJosh Poimboeuf read_lock(&tasklist_lock); 110d83a7cb3SJosh Poimboeuf for_each_process_thread(g, task) { 111d83a7cb3SJosh Poimboeuf WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); 112d83a7cb3SJosh Poimboeuf task->patch_state = KLP_UNDEFINED; 113d83a7cb3SJosh Poimboeuf } 114d83a7cb3SJosh Poimboeuf read_unlock(&tasklist_lock); 115d83a7cb3SJosh Poimboeuf 116d83a7cb3SJosh Poimboeuf for_each_possible_cpu(cpu) { 117d83a7cb3SJosh Poimboeuf task = idle_task(cpu); 118d83a7cb3SJosh Poimboeuf WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); 119d83a7cb3SJosh Poimboeuf task->patch_state = KLP_UNDEFINED; 120d83a7cb3SJosh Poimboeuf } 121d83a7cb3SJosh Poimboeuf 12293862e38SJoe Lawrence klp_for_each_object(klp_transition_patch, obj) { 12393862e38SJoe Lawrence if (!klp_is_object_loaded(obj)) 12493862e38SJoe Lawrence continue; 12593862e38SJoe Lawrence if (klp_target_state == KLP_PATCHED) 12693862e38SJoe Lawrence klp_post_patch_callback(obj); 12793862e38SJoe Lawrence else if (klp_target_state == KLP_UNPATCHED) 12893862e38SJoe Lawrence klp_post_unpatch_callback(obj); 12993862e38SJoe Lawrence } 13093862e38SJoe Lawrence 1316116c303SJoe Lawrence pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name, 1326116c303SJoe Lawrence klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); 1336116c303SJoe Lawrence 134d83a7cb3SJosh Poimboeuf klp_target_state = KLP_UNDEFINED; 135d83a7cb3SJosh Poimboeuf klp_transition_patch = NULL; 136d83a7cb3SJosh Poimboeuf } 137d83a7cb3SJosh Poimboeuf 138d83a7cb3SJosh Poimboeuf /* 139d83a7cb3SJosh Poimboeuf * This is called in the error path, to cancel a transition before it has 140d83a7cb3SJosh Poimboeuf * started, i.e. klp_init_transition() has been called but 141d83a7cb3SJosh Poimboeuf * klp_start_transition() hasn't. If the transition *has* been started, 142d83a7cb3SJosh Poimboeuf * klp_reverse_transition() should be used instead. 143d83a7cb3SJosh Poimboeuf */ 144d83a7cb3SJosh Poimboeuf void klp_cancel_transition(void) 145d83a7cb3SJosh Poimboeuf { 1463ec24776SJosh Poimboeuf if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED)) 1473ec24776SJosh Poimboeuf return; 1483ec24776SJosh Poimboeuf 149af026796SJoe Lawrence pr_debug("'%s': canceling patching transition, going to unpatch\n", 150af026796SJoe Lawrence klp_transition_patch->mod->name); 151af026796SJoe Lawrence 1523ec24776SJosh Poimboeuf klp_target_state = KLP_UNPATCHED; 153d83a7cb3SJosh Poimboeuf klp_complete_transition(); 154d83a7cb3SJosh Poimboeuf } 155d83a7cb3SJosh Poimboeuf 156d83a7cb3SJosh Poimboeuf /* 157d83a7cb3SJosh Poimboeuf * Switch the patched state of the task to the set of functions in the target 158d83a7cb3SJosh Poimboeuf * patch state. 159d83a7cb3SJosh Poimboeuf * 160d83a7cb3SJosh Poimboeuf * NOTE: If task is not 'current', the caller must ensure the task is inactive. 161d83a7cb3SJosh Poimboeuf * Otherwise klp_ftrace_handler() might read the wrong 'patch_state' value. 162d83a7cb3SJosh Poimboeuf */ 163d83a7cb3SJosh Poimboeuf void klp_update_patch_state(struct task_struct *task) 164d83a7cb3SJosh Poimboeuf { 165842c0884SPetr Mladek /* 1666932689eSPaul E. McKenney * A variant of synchronize_rcu() is used to allow patching functions 167842c0884SPetr Mladek * where RCU is not watching, see klp_synchronize_transition(). 168842c0884SPetr Mladek */ 169842c0884SPetr Mladek preempt_disable_notrace(); 170d83a7cb3SJosh Poimboeuf 171d83a7cb3SJosh Poimboeuf /* 172d83a7cb3SJosh Poimboeuf * This test_and_clear_tsk_thread_flag() call also serves as a read 173d83a7cb3SJosh Poimboeuf * barrier (smp_rmb) for two cases: 174d83a7cb3SJosh Poimboeuf * 175d83a7cb3SJosh Poimboeuf * 1) Enforce the order of the TIF_PATCH_PENDING read and the 176d83a7cb3SJosh Poimboeuf * klp_target_state read. The corresponding write barrier is in 177d83a7cb3SJosh Poimboeuf * klp_init_transition(). 178d83a7cb3SJosh Poimboeuf * 179d83a7cb3SJosh Poimboeuf * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read 180d83a7cb3SJosh Poimboeuf * of func->transition, if klp_ftrace_handler() is called later on 181d83a7cb3SJosh Poimboeuf * the same CPU. See __klp_disable_patch(). 182d83a7cb3SJosh Poimboeuf */ 183d83a7cb3SJosh Poimboeuf if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING)) 184d83a7cb3SJosh Poimboeuf task->patch_state = READ_ONCE(klp_target_state); 185d83a7cb3SJosh Poimboeuf 186842c0884SPetr Mladek preempt_enable_notrace(); 187d83a7cb3SJosh Poimboeuf } 188d83a7cb3SJosh Poimboeuf 189d83a7cb3SJosh Poimboeuf /* 190d83a7cb3SJosh Poimboeuf * Determine whether the given stack trace includes any references to a 191d83a7cb3SJosh Poimboeuf * to-be-patched or to-be-unpatched function. 192d83a7cb3SJosh Poimboeuf */ 19325e39e32SThomas Gleixner static int klp_check_stack_func(struct klp_func *func, unsigned long *entries, 19425e39e32SThomas Gleixner unsigned int nr_entries) 195d83a7cb3SJosh Poimboeuf { 196d83a7cb3SJosh Poimboeuf unsigned long func_addr, func_size, address; 197d83a7cb3SJosh Poimboeuf struct klp_ops *ops; 198d83a7cb3SJosh Poimboeuf int i; 199d83a7cb3SJosh Poimboeuf 20025e39e32SThomas Gleixner for (i = 0; i < nr_entries; i++) { 20125e39e32SThomas Gleixner address = entries[i]; 202d83a7cb3SJosh Poimboeuf 203d83a7cb3SJosh Poimboeuf if (klp_target_state == KLP_UNPATCHED) { 204d83a7cb3SJosh Poimboeuf /* 205d83a7cb3SJosh Poimboeuf * Check for the to-be-unpatched function 206d83a7cb3SJosh Poimboeuf * (the func itself). 207d83a7cb3SJosh Poimboeuf */ 208d83a7cb3SJosh Poimboeuf func_addr = (unsigned long)func->new_func; 209d83a7cb3SJosh Poimboeuf func_size = func->new_size; 210d83a7cb3SJosh Poimboeuf } else { 211d83a7cb3SJosh Poimboeuf /* 212d83a7cb3SJosh Poimboeuf * Check for the to-be-patched function 213d83a7cb3SJosh Poimboeuf * (the previous func). 214d83a7cb3SJosh Poimboeuf */ 21519514910SPetr Mladek ops = klp_find_ops(func->old_func); 216d83a7cb3SJosh Poimboeuf 217d83a7cb3SJosh Poimboeuf if (list_is_singular(&ops->func_stack)) { 218d83a7cb3SJosh Poimboeuf /* original function */ 21919514910SPetr Mladek func_addr = (unsigned long)func->old_func; 220d83a7cb3SJosh Poimboeuf func_size = func->old_size; 221d83a7cb3SJosh Poimboeuf } else { 222d83a7cb3SJosh Poimboeuf /* previously patched function */ 223d83a7cb3SJosh Poimboeuf struct klp_func *prev; 224d83a7cb3SJosh Poimboeuf 225d83a7cb3SJosh Poimboeuf prev = list_next_entry(func, stack_node); 226d83a7cb3SJosh Poimboeuf func_addr = (unsigned long)prev->new_func; 227d83a7cb3SJosh Poimboeuf func_size = prev->new_size; 228d83a7cb3SJosh Poimboeuf } 229d83a7cb3SJosh Poimboeuf } 230d83a7cb3SJosh Poimboeuf 231d83a7cb3SJosh Poimboeuf if (address >= func_addr && address < func_addr + func_size) 232d83a7cb3SJosh Poimboeuf return -EAGAIN; 233d83a7cb3SJosh Poimboeuf } 234d83a7cb3SJosh Poimboeuf 235d83a7cb3SJosh Poimboeuf return 0; 236d83a7cb3SJosh Poimboeuf } 237d83a7cb3SJosh Poimboeuf 238d83a7cb3SJosh Poimboeuf /* 239d83a7cb3SJosh Poimboeuf * Determine whether it's safe to transition the task to the target patch state 240d83a7cb3SJosh Poimboeuf * by looking for any to-be-patched or to-be-unpatched functions on its stack. 241d83a7cb3SJosh Poimboeuf */ 242d83a7cb3SJosh Poimboeuf static int klp_check_stack(struct task_struct *task, char *err_buf) 243d83a7cb3SJosh Poimboeuf { 244d83a7cb3SJosh Poimboeuf static unsigned long entries[MAX_STACK_ENTRIES]; 245d83a7cb3SJosh Poimboeuf struct klp_object *obj; 246d83a7cb3SJosh Poimboeuf struct klp_func *func; 24725e39e32SThomas Gleixner int ret, nr_entries; 248d83a7cb3SJosh Poimboeuf 24925e39e32SThomas Gleixner ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries)); 250d83a7cb3SJosh Poimboeuf WARN_ON_ONCE(ret == -ENOSYS); 25125e39e32SThomas Gleixner if (ret < 0) { 252d83a7cb3SJosh Poimboeuf snprintf(err_buf, STACK_ERR_BUF_SIZE, 253d83a7cb3SJosh Poimboeuf "%s: %s:%d has an unreliable stack\n", 254d83a7cb3SJosh Poimboeuf __func__, task->comm, task->pid); 255d83a7cb3SJosh Poimboeuf return ret; 256d83a7cb3SJosh Poimboeuf } 25725e39e32SThomas Gleixner nr_entries = ret; 258d83a7cb3SJosh Poimboeuf 259d83a7cb3SJosh Poimboeuf klp_for_each_object(klp_transition_patch, obj) { 260d83a7cb3SJosh Poimboeuf if (!obj->patched) 261d83a7cb3SJosh Poimboeuf continue; 262d83a7cb3SJosh Poimboeuf klp_for_each_func(obj, func) { 26325e39e32SThomas Gleixner ret = klp_check_stack_func(func, entries, nr_entries); 264d83a7cb3SJosh Poimboeuf if (ret) { 265d83a7cb3SJosh Poimboeuf snprintf(err_buf, STACK_ERR_BUF_SIZE, 266d83a7cb3SJosh Poimboeuf "%s: %s:%d is sleeping on function %s\n", 267d83a7cb3SJosh Poimboeuf __func__, task->comm, task->pid, 268d83a7cb3SJosh Poimboeuf func->old_name); 269d83a7cb3SJosh Poimboeuf return ret; 270d83a7cb3SJosh Poimboeuf } 271d83a7cb3SJosh Poimboeuf } 272d83a7cb3SJosh Poimboeuf } 273d83a7cb3SJosh Poimboeuf 274d83a7cb3SJosh Poimboeuf return 0; 275d83a7cb3SJosh Poimboeuf } 276d83a7cb3SJosh Poimboeuf 277d83a7cb3SJosh Poimboeuf /* 278d83a7cb3SJosh Poimboeuf * Try to safely switch a task to the target patch state. If it's currently 279d83a7cb3SJosh Poimboeuf * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or 280d83a7cb3SJosh Poimboeuf * if the stack is unreliable, return false. 281d83a7cb3SJosh Poimboeuf */ 282d83a7cb3SJosh Poimboeuf static bool klp_try_switch_task(struct task_struct *task) 283d83a7cb3SJosh Poimboeuf { 284d83a7cb3SJosh Poimboeuf struct rq *rq; 285d83a7cb3SJosh Poimboeuf struct rq_flags flags; 286d83a7cb3SJosh Poimboeuf int ret; 287d83a7cb3SJosh Poimboeuf bool success = false; 288d83a7cb3SJosh Poimboeuf char err_buf[STACK_ERR_BUF_SIZE]; 289d83a7cb3SJosh Poimboeuf 290d83a7cb3SJosh Poimboeuf err_buf[0] = '\0'; 291d83a7cb3SJosh Poimboeuf 292d83a7cb3SJosh Poimboeuf /* check if this task has already switched over */ 293d83a7cb3SJosh Poimboeuf if (task->patch_state == klp_target_state) 294d83a7cb3SJosh Poimboeuf return true; 295d83a7cb3SJosh Poimboeuf 296d83a7cb3SJosh Poimboeuf /* 297d83a7cb3SJosh Poimboeuf * Now try to check the stack for any to-be-patched or to-be-unpatched 298d83a7cb3SJosh Poimboeuf * functions. If all goes well, switch the task to the target patch 299d83a7cb3SJosh Poimboeuf * state. 300d83a7cb3SJosh Poimboeuf */ 301d83a7cb3SJosh Poimboeuf rq = task_rq_lock(task, &flags); 302d83a7cb3SJosh Poimboeuf 303d83a7cb3SJosh Poimboeuf if (task_running(rq, task) && task != current) { 304d83a7cb3SJosh Poimboeuf snprintf(err_buf, STACK_ERR_BUF_SIZE, 305d83a7cb3SJosh Poimboeuf "%s: %s:%d is running\n", __func__, task->comm, 306d83a7cb3SJosh Poimboeuf task->pid); 307d83a7cb3SJosh Poimboeuf goto done; 308d83a7cb3SJosh Poimboeuf } 309d83a7cb3SJosh Poimboeuf 310d83a7cb3SJosh Poimboeuf ret = klp_check_stack(task, err_buf); 311d83a7cb3SJosh Poimboeuf if (ret) 312d83a7cb3SJosh Poimboeuf goto done; 313d83a7cb3SJosh Poimboeuf 314d83a7cb3SJosh Poimboeuf success = true; 315d83a7cb3SJosh Poimboeuf 316d83a7cb3SJosh Poimboeuf clear_tsk_thread_flag(task, TIF_PATCH_PENDING); 317d83a7cb3SJosh Poimboeuf task->patch_state = klp_target_state; 318d83a7cb3SJosh Poimboeuf 319d83a7cb3SJosh Poimboeuf done: 320d83a7cb3SJosh Poimboeuf task_rq_unlock(rq, task, &flags); 321d83a7cb3SJosh Poimboeuf 322d83a7cb3SJosh Poimboeuf /* 323d83a7cb3SJosh Poimboeuf * Due to console deadlock issues, pr_debug() can't be used while 324d83a7cb3SJosh Poimboeuf * holding the task rq lock. Instead we have to use a temporary buffer 325d83a7cb3SJosh Poimboeuf * and print the debug message after releasing the lock. 326d83a7cb3SJosh Poimboeuf */ 327d83a7cb3SJosh Poimboeuf if (err_buf[0] != '\0') 328d83a7cb3SJosh Poimboeuf pr_debug("%s", err_buf); 329d83a7cb3SJosh Poimboeuf 330d83a7cb3SJosh Poimboeuf return success; 331d83a7cb3SJosh Poimboeuf 332d83a7cb3SJosh Poimboeuf } 333d83a7cb3SJosh Poimboeuf 334d83a7cb3SJosh Poimboeuf /* 3350b3d5279SMiroslav Benes * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set. 3360b3d5279SMiroslav Benes * Kthreads with TIF_PATCH_PENDING set are woken up. 3370b3d5279SMiroslav Benes */ 3380b3d5279SMiroslav Benes static void klp_send_signals(void) 3390b3d5279SMiroslav Benes { 3400b3d5279SMiroslav Benes struct task_struct *g, *task; 3410b3d5279SMiroslav Benes 3420b3d5279SMiroslav Benes if (klp_signals_cnt == SIGNALS_TIMEOUT) 3430b3d5279SMiroslav Benes pr_notice("signaling remaining tasks\n"); 3440b3d5279SMiroslav Benes 3450b3d5279SMiroslav Benes read_lock(&tasklist_lock); 3460b3d5279SMiroslav Benes for_each_process_thread(g, task) { 3470b3d5279SMiroslav Benes if (!klp_patch_pending(task)) 3480b3d5279SMiroslav Benes continue; 3490b3d5279SMiroslav Benes 3500b3d5279SMiroslav Benes /* 3510b3d5279SMiroslav Benes * There is a small race here. We could see TIF_PATCH_PENDING 3520b3d5279SMiroslav Benes * set and decide to wake up a kthread or send a fake signal. 3530b3d5279SMiroslav Benes * Meanwhile the task could migrate itself and the action 3540b3d5279SMiroslav Benes * would be meaningless. It is not serious though. 3550b3d5279SMiroslav Benes */ 3560b3d5279SMiroslav Benes if (task->flags & PF_KTHREAD) { 3570b3d5279SMiroslav Benes /* 3580b3d5279SMiroslav Benes * Wake up a kthread which sleeps interruptedly and 3590b3d5279SMiroslav Benes * still has not been migrated. 3600b3d5279SMiroslav Benes */ 3610b3d5279SMiroslav Benes wake_up_state(task, TASK_INTERRUPTIBLE); 3620b3d5279SMiroslav Benes } else { 3630b3d5279SMiroslav Benes /* 3640b3d5279SMiroslav Benes * Send fake signal to all non-kthread tasks which are 3650b3d5279SMiroslav Benes * still not migrated. 3660b3d5279SMiroslav Benes */ 3670b3d5279SMiroslav Benes spin_lock_irq(&task->sighand->siglock); 3680b3d5279SMiroslav Benes signal_wake_up(task, 0); 3690b3d5279SMiroslav Benes spin_unlock_irq(&task->sighand->siglock); 3700b3d5279SMiroslav Benes } 3710b3d5279SMiroslav Benes } 3720b3d5279SMiroslav Benes read_unlock(&tasklist_lock); 3730b3d5279SMiroslav Benes } 3740b3d5279SMiroslav Benes 3750b3d5279SMiroslav Benes /* 376d83a7cb3SJosh Poimboeuf * Try to switch all remaining tasks to the target patch state by walking the 377d83a7cb3SJosh Poimboeuf * stacks of sleeping tasks and looking for any to-be-patched or 378d83a7cb3SJosh Poimboeuf * to-be-unpatched functions. If such functions are found, the task can't be 379d83a7cb3SJosh Poimboeuf * switched yet. 380d83a7cb3SJosh Poimboeuf * 381d83a7cb3SJosh Poimboeuf * If any tasks are still stuck in the initial patch state, schedule a retry. 382d83a7cb3SJosh Poimboeuf */ 383d83a7cb3SJosh Poimboeuf void klp_try_complete_transition(void) 384d83a7cb3SJosh Poimboeuf { 385d83a7cb3SJosh Poimboeuf unsigned int cpu; 386d83a7cb3SJosh Poimboeuf struct task_struct *g, *task; 387958ef1e3SPetr Mladek struct klp_patch *patch; 388d83a7cb3SJosh Poimboeuf bool complete = true; 389d83a7cb3SJosh Poimboeuf 390d83a7cb3SJosh Poimboeuf WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); 391d83a7cb3SJosh Poimboeuf 392d83a7cb3SJosh Poimboeuf /* 393d83a7cb3SJosh Poimboeuf * Try to switch the tasks to the target patch state by walking their 394d83a7cb3SJosh Poimboeuf * stacks and looking for any to-be-patched or to-be-unpatched 395d83a7cb3SJosh Poimboeuf * functions. If such functions are found on a stack, or if the stack 396d83a7cb3SJosh Poimboeuf * is deemed unreliable, the task can't be switched yet. 397d83a7cb3SJosh Poimboeuf * 398d83a7cb3SJosh Poimboeuf * Usually this will transition most (or all) of the tasks on a system 399d83a7cb3SJosh Poimboeuf * unless the patch includes changes to a very common function. 400d83a7cb3SJosh Poimboeuf */ 401d83a7cb3SJosh Poimboeuf read_lock(&tasklist_lock); 402d83a7cb3SJosh Poimboeuf for_each_process_thread(g, task) 403d83a7cb3SJosh Poimboeuf if (!klp_try_switch_task(task)) 404d83a7cb3SJosh Poimboeuf complete = false; 405d83a7cb3SJosh Poimboeuf read_unlock(&tasklist_lock); 406d83a7cb3SJosh Poimboeuf 407d83a7cb3SJosh Poimboeuf /* 408d83a7cb3SJosh Poimboeuf * Ditto for the idle "swapper" tasks. 409d83a7cb3SJosh Poimboeuf */ 410d83a7cb3SJosh Poimboeuf get_online_cpus(); 411d83a7cb3SJosh Poimboeuf for_each_possible_cpu(cpu) { 412d83a7cb3SJosh Poimboeuf task = idle_task(cpu); 413d83a7cb3SJosh Poimboeuf if (cpu_online(cpu)) { 414d83a7cb3SJosh Poimboeuf if (!klp_try_switch_task(task)) 415d83a7cb3SJosh Poimboeuf complete = false; 416d83a7cb3SJosh Poimboeuf } else if (task->patch_state != klp_target_state) { 417d83a7cb3SJosh Poimboeuf /* offline idle tasks can be switched immediately */ 418d83a7cb3SJosh Poimboeuf clear_tsk_thread_flag(task, TIF_PATCH_PENDING); 419d83a7cb3SJosh Poimboeuf task->patch_state = klp_target_state; 420d83a7cb3SJosh Poimboeuf } 421d83a7cb3SJosh Poimboeuf } 422d83a7cb3SJosh Poimboeuf put_online_cpus(); 423d83a7cb3SJosh Poimboeuf 424d83a7cb3SJosh Poimboeuf if (!complete) { 425cba82deaSMiroslav Benes if (klp_signals_cnt && !(klp_signals_cnt % SIGNALS_TIMEOUT)) 426cba82deaSMiroslav Benes klp_send_signals(); 427cba82deaSMiroslav Benes klp_signals_cnt++; 428cba82deaSMiroslav Benes 429d83a7cb3SJosh Poimboeuf /* 430d83a7cb3SJosh Poimboeuf * Some tasks weren't able to be switched over. Try again 431d83a7cb3SJosh Poimboeuf * later and/or wait for other methods like kernel exit 432d83a7cb3SJosh Poimboeuf * switching. 433d83a7cb3SJosh Poimboeuf */ 434d83a7cb3SJosh Poimboeuf schedule_delayed_work(&klp_transition_work, 435d83a7cb3SJosh Poimboeuf round_jiffies_relative(HZ)); 436d83a7cb3SJosh Poimboeuf return; 437d83a7cb3SJosh Poimboeuf } 438d83a7cb3SJosh Poimboeuf 439d83a7cb3SJosh Poimboeuf /* we're done, now cleanup the data structures */ 440958ef1e3SPetr Mladek patch = klp_transition_patch; 441d83a7cb3SJosh Poimboeuf klp_complete_transition(); 442958ef1e3SPetr Mladek 443958ef1e3SPetr Mladek /* 444958ef1e3SPetr Mladek * It would make more sense to free the patch in 445958ef1e3SPetr Mladek * klp_complete_transition() but it is called also 446958ef1e3SPetr Mladek * from klp_cancel_transition(). 447958ef1e3SPetr Mladek */ 448958ef1e3SPetr Mladek if (!patch->enabled) { 449958ef1e3SPetr Mladek klp_free_patch_start(patch); 450958ef1e3SPetr Mladek schedule_work(&patch->free_work); 451958ef1e3SPetr Mladek } 452d83a7cb3SJosh Poimboeuf } 453d83a7cb3SJosh Poimboeuf 454d83a7cb3SJosh Poimboeuf /* 455d83a7cb3SJosh Poimboeuf * Start the transition to the specified target patch state so tasks can begin 456d83a7cb3SJosh Poimboeuf * switching to it. 457d83a7cb3SJosh Poimboeuf */ 458d83a7cb3SJosh Poimboeuf void klp_start_transition(void) 459d83a7cb3SJosh Poimboeuf { 460d83a7cb3SJosh Poimboeuf struct task_struct *g, *task; 461d83a7cb3SJosh Poimboeuf unsigned int cpu; 462d83a7cb3SJosh Poimboeuf 463d83a7cb3SJosh Poimboeuf WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); 464d83a7cb3SJosh Poimboeuf 465af026796SJoe Lawrence pr_notice("'%s': starting %s transition\n", 466af026796SJoe Lawrence klp_transition_patch->mod->name, 467d83a7cb3SJosh Poimboeuf klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); 468d83a7cb3SJosh Poimboeuf 469d83a7cb3SJosh Poimboeuf /* 470d83a7cb3SJosh Poimboeuf * Mark all normal tasks as needing a patch state update. They'll 471d83a7cb3SJosh Poimboeuf * switch either in klp_try_complete_transition() or as they exit the 472d83a7cb3SJosh Poimboeuf * kernel. 473d83a7cb3SJosh Poimboeuf */ 474d83a7cb3SJosh Poimboeuf read_lock(&tasklist_lock); 475d83a7cb3SJosh Poimboeuf for_each_process_thread(g, task) 476d83a7cb3SJosh Poimboeuf if (task->patch_state != klp_target_state) 477d83a7cb3SJosh Poimboeuf set_tsk_thread_flag(task, TIF_PATCH_PENDING); 478d83a7cb3SJosh Poimboeuf read_unlock(&tasklist_lock); 479d83a7cb3SJosh Poimboeuf 480d83a7cb3SJosh Poimboeuf /* 481d83a7cb3SJosh Poimboeuf * Mark all idle tasks as needing a patch state update. They'll switch 482d83a7cb3SJosh Poimboeuf * either in klp_try_complete_transition() or at the idle loop switch 483d83a7cb3SJosh Poimboeuf * point. 484d83a7cb3SJosh Poimboeuf */ 485d83a7cb3SJosh Poimboeuf for_each_possible_cpu(cpu) { 486d83a7cb3SJosh Poimboeuf task = idle_task(cpu); 487d83a7cb3SJosh Poimboeuf if (task->patch_state != klp_target_state) 488d83a7cb3SJosh Poimboeuf set_tsk_thread_flag(task, TIF_PATCH_PENDING); 489d83a7cb3SJosh Poimboeuf } 490cba82deaSMiroslav Benes 491cba82deaSMiroslav Benes klp_signals_cnt = 0; 492d83a7cb3SJosh Poimboeuf } 493d83a7cb3SJosh Poimboeuf 494d83a7cb3SJosh Poimboeuf /* 495d83a7cb3SJosh Poimboeuf * Initialize the global target patch state and all tasks to the initial patch 496d83a7cb3SJosh Poimboeuf * state, and initialize all function transition states to true in preparation 497d83a7cb3SJosh Poimboeuf * for patching or unpatching. 498d83a7cb3SJosh Poimboeuf */ 499d83a7cb3SJosh Poimboeuf void klp_init_transition(struct klp_patch *patch, int state) 500d83a7cb3SJosh Poimboeuf { 501d83a7cb3SJosh Poimboeuf struct task_struct *g, *task; 502d83a7cb3SJosh Poimboeuf unsigned int cpu; 503d83a7cb3SJosh Poimboeuf struct klp_object *obj; 504d83a7cb3SJosh Poimboeuf struct klp_func *func; 505d83a7cb3SJosh Poimboeuf int initial_state = !state; 506d83a7cb3SJosh Poimboeuf 507d83a7cb3SJosh Poimboeuf WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED); 508d83a7cb3SJosh Poimboeuf 509d83a7cb3SJosh Poimboeuf klp_transition_patch = patch; 510d83a7cb3SJosh Poimboeuf 511d83a7cb3SJosh Poimboeuf /* 512d83a7cb3SJosh Poimboeuf * Set the global target patch state which tasks will switch to. This 513d83a7cb3SJosh Poimboeuf * has no effect until the TIF_PATCH_PENDING flags get set later. 514d83a7cb3SJosh Poimboeuf */ 515d83a7cb3SJosh Poimboeuf klp_target_state = state; 516d83a7cb3SJosh Poimboeuf 517af026796SJoe Lawrence pr_debug("'%s': initializing %s transition\n", patch->mod->name, 518af026796SJoe Lawrence klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); 519af026796SJoe Lawrence 520d83a7cb3SJosh Poimboeuf /* 521d83a7cb3SJosh Poimboeuf * Initialize all tasks to the initial patch state to prepare them for 522d83a7cb3SJosh Poimboeuf * switching to the target state. 523d83a7cb3SJosh Poimboeuf */ 524d83a7cb3SJosh Poimboeuf read_lock(&tasklist_lock); 525d83a7cb3SJosh Poimboeuf for_each_process_thread(g, task) { 526d83a7cb3SJosh Poimboeuf WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED); 527d83a7cb3SJosh Poimboeuf task->patch_state = initial_state; 528d83a7cb3SJosh Poimboeuf } 529d83a7cb3SJosh Poimboeuf read_unlock(&tasklist_lock); 530d83a7cb3SJosh Poimboeuf 531d83a7cb3SJosh Poimboeuf /* 532d83a7cb3SJosh Poimboeuf * Ditto for the idle "swapper" tasks. 533d83a7cb3SJosh Poimboeuf */ 534d83a7cb3SJosh Poimboeuf for_each_possible_cpu(cpu) { 535d83a7cb3SJosh Poimboeuf task = idle_task(cpu); 536d83a7cb3SJosh Poimboeuf WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED); 537d83a7cb3SJosh Poimboeuf task->patch_state = initial_state; 538d83a7cb3SJosh Poimboeuf } 539d83a7cb3SJosh Poimboeuf 540d83a7cb3SJosh Poimboeuf /* 541d83a7cb3SJosh Poimboeuf * Enforce the order of the task->patch_state initializations and the 542d83a7cb3SJosh Poimboeuf * func->transition updates to ensure that klp_ftrace_handler() doesn't 543d83a7cb3SJosh Poimboeuf * see a func in transition with a task->patch_state of KLP_UNDEFINED. 544d83a7cb3SJosh Poimboeuf * 545d83a7cb3SJosh Poimboeuf * Also enforce the order of the klp_target_state write and future 546d83a7cb3SJosh Poimboeuf * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() doesn't 547d83a7cb3SJosh Poimboeuf * set a task->patch_state to KLP_UNDEFINED. 548d83a7cb3SJosh Poimboeuf */ 549d83a7cb3SJosh Poimboeuf smp_wmb(); 550d83a7cb3SJosh Poimboeuf 551d83a7cb3SJosh Poimboeuf /* 552d83a7cb3SJosh Poimboeuf * Set the func transition states so klp_ftrace_handler() will know to 553d83a7cb3SJosh Poimboeuf * switch to the transition logic. 554d83a7cb3SJosh Poimboeuf * 555d83a7cb3SJosh Poimboeuf * When patching, the funcs aren't yet in the func_stack and will be 556d83a7cb3SJosh Poimboeuf * made visible to the ftrace handler shortly by the calls to 557d83a7cb3SJosh Poimboeuf * klp_patch_object(). 558d83a7cb3SJosh Poimboeuf * 559d83a7cb3SJosh Poimboeuf * When unpatching, the funcs are already in the func_stack and so are 560d83a7cb3SJosh Poimboeuf * already visible to the ftrace handler. 561d83a7cb3SJosh Poimboeuf */ 562d83a7cb3SJosh Poimboeuf klp_for_each_object(patch, obj) 563d83a7cb3SJosh Poimboeuf klp_for_each_func(obj, func) 564d83a7cb3SJosh Poimboeuf func->transition = true; 565d83a7cb3SJosh Poimboeuf } 566d83a7cb3SJosh Poimboeuf 567d83a7cb3SJosh Poimboeuf /* 568d83a7cb3SJosh Poimboeuf * This function can be called in the middle of an existing transition to 569d83a7cb3SJosh Poimboeuf * reverse the direction of the target patch state. This can be done to 570d83a7cb3SJosh Poimboeuf * effectively cancel an existing enable or disable operation if there are any 571d83a7cb3SJosh Poimboeuf * tasks which are stuck in the initial patch state. 572d83a7cb3SJosh Poimboeuf */ 573d83a7cb3SJosh Poimboeuf void klp_reverse_transition(void) 574d83a7cb3SJosh Poimboeuf { 575d83a7cb3SJosh Poimboeuf unsigned int cpu; 576d83a7cb3SJosh Poimboeuf struct task_struct *g, *task; 577d83a7cb3SJosh Poimboeuf 578af026796SJoe Lawrence pr_debug("'%s': reversing transition from %s\n", 579af026796SJoe Lawrence klp_transition_patch->mod->name, 580af026796SJoe Lawrence klp_target_state == KLP_PATCHED ? "patching to unpatching" : 581af026796SJoe Lawrence "unpatching to patching"); 582af026796SJoe Lawrence 583d83a7cb3SJosh Poimboeuf klp_transition_patch->enabled = !klp_transition_patch->enabled; 584d83a7cb3SJosh Poimboeuf 585d83a7cb3SJosh Poimboeuf klp_target_state = !klp_target_state; 586d83a7cb3SJosh Poimboeuf 587d83a7cb3SJosh Poimboeuf /* 588d83a7cb3SJosh Poimboeuf * Clear all TIF_PATCH_PENDING flags to prevent races caused by 589d83a7cb3SJosh Poimboeuf * klp_update_patch_state() running in parallel with 590d83a7cb3SJosh Poimboeuf * klp_start_transition(). 591d83a7cb3SJosh Poimboeuf */ 592d83a7cb3SJosh Poimboeuf read_lock(&tasklist_lock); 593d83a7cb3SJosh Poimboeuf for_each_process_thread(g, task) 594d83a7cb3SJosh Poimboeuf clear_tsk_thread_flag(task, TIF_PATCH_PENDING); 595d83a7cb3SJosh Poimboeuf read_unlock(&tasklist_lock); 596d83a7cb3SJosh Poimboeuf 597d83a7cb3SJosh Poimboeuf for_each_possible_cpu(cpu) 598d83a7cb3SJosh Poimboeuf clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING); 599d83a7cb3SJosh Poimboeuf 600d83a7cb3SJosh Poimboeuf /* Let any remaining calls to klp_update_patch_state() complete */ 601842c0884SPetr Mladek klp_synchronize_transition(); 602d83a7cb3SJosh Poimboeuf 603d83a7cb3SJosh Poimboeuf klp_start_transition(); 604d83a7cb3SJosh Poimboeuf } 605d83a7cb3SJosh Poimboeuf 606d83a7cb3SJosh Poimboeuf /* Called from copy_process() during fork */ 607d83a7cb3SJosh Poimboeuf void klp_copy_process(struct task_struct *child) 608d83a7cb3SJosh Poimboeuf { 609d83a7cb3SJosh Poimboeuf child->patch_state = current->patch_state; 610d83a7cb3SJosh Poimboeuf 611d83a7cb3SJosh Poimboeuf /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */ 612d83a7cb3SJosh Poimboeuf } 61343347d56SMiroslav Benes 61443347d56SMiroslav Benes /* 615c99a2be7SMiroslav Benes * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an 616c99a2be7SMiroslav Benes * existing transition to finish. 617c99a2be7SMiroslav Benes * 618c99a2be7SMiroslav Benes * NOTE: klp_update_patch_state(task) requires the task to be inactive or 619c99a2be7SMiroslav Benes * 'current'. This is not the case here and the consistency model could be 620c99a2be7SMiroslav Benes * broken. Administrator, who is the only one to execute the 621c99a2be7SMiroslav Benes * klp_force_transitions(), has to be aware of this. 622c99a2be7SMiroslav Benes */ 623c99a2be7SMiroslav Benes void klp_force_transition(void) 624c99a2be7SMiroslav Benes { 62568007289SPetr Mladek struct klp_patch *patch; 626c99a2be7SMiroslav Benes struct task_struct *g, *task; 627c99a2be7SMiroslav Benes unsigned int cpu; 628c99a2be7SMiroslav Benes 629c99a2be7SMiroslav Benes pr_warn("forcing remaining tasks to the patched state\n"); 630c99a2be7SMiroslav Benes 631c99a2be7SMiroslav Benes read_lock(&tasklist_lock); 632c99a2be7SMiroslav Benes for_each_process_thread(g, task) 633c99a2be7SMiroslav Benes klp_update_patch_state(task); 634c99a2be7SMiroslav Benes read_unlock(&tasklist_lock); 635c99a2be7SMiroslav Benes 636c99a2be7SMiroslav Benes for_each_possible_cpu(cpu) 637c99a2be7SMiroslav Benes klp_update_patch_state(idle_task(cpu)); 638c99a2be7SMiroslav Benes 639ecba29f4SPetr Mladek klp_for_each_patch(patch) 64068007289SPetr Mladek patch->forced = true; 641c99a2be7SMiroslav Benes } 642