xref: /linux/kernel/livepatch/core.c (revision 44f57d78)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * core.c - Kernel Live Patching Core
4  *
5  * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
6  * Copyright (C) 2014 SUSE
7  */
8 
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/mutex.h>
14 #include <linux/slab.h>
15 #include <linux/list.h>
16 #include <linux/kallsyms.h>
17 #include <linux/livepatch.h>
18 #include <linux/elf.h>
19 #include <linux/moduleloader.h>
20 #include <linux/completion.h>
21 #include <linux/memory.h>
22 #include <asm/cacheflush.h>
23 #include "core.h"
24 #include "patch.h"
25 #include "transition.h"
26 
27 /*
28  * klp_mutex is a coarse lock which serializes access to klp data.  All
29  * accesses to klp-related variables and structures must have mutex protection,
30  * except within the following functions which carefully avoid the need for it:
31  *
32  * - klp_ftrace_handler()
33  * - klp_update_patch_state()
34  */
35 DEFINE_MUTEX(klp_mutex);
36 
37 /*
38  * Actively used patches: enabled or in transition. Note that replaced
39  * or disabled patches are not listed even though the related kernel
40  * module still can be loaded.
41  */
42 LIST_HEAD(klp_patches);
43 
44 static struct kobject *klp_root_kobj;
45 
46 static bool klp_is_module(struct klp_object *obj)
47 {
48 	return obj->name;
49 }
50 
51 /* sets obj->mod if object is not vmlinux and module is found */
52 static void klp_find_object_module(struct klp_object *obj)
53 {
54 	struct module *mod;
55 
56 	if (!klp_is_module(obj))
57 		return;
58 
59 	mutex_lock(&module_mutex);
60 	/*
61 	 * We do not want to block removal of patched modules and therefore
62 	 * we do not take a reference here. The patches are removed by
63 	 * klp_module_going() instead.
64 	 */
65 	mod = find_module(obj->name);
66 	/*
67 	 * Do not mess work of klp_module_coming() and klp_module_going().
68 	 * Note that the patch might still be needed before klp_module_going()
69 	 * is called. Module functions can be called even in the GOING state
70 	 * until mod->exit() finishes. This is especially important for
71 	 * patches that modify semantic of the functions.
72 	 */
73 	if (mod && mod->klp_alive)
74 		obj->mod = mod;
75 
76 	mutex_unlock(&module_mutex);
77 }
78 
79 static bool klp_initialized(void)
80 {
81 	return !!klp_root_kobj;
82 }
83 
84 static struct klp_func *klp_find_func(struct klp_object *obj,
85 				      struct klp_func *old_func)
86 {
87 	struct klp_func *func;
88 
89 	klp_for_each_func(obj, func) {
90 		if ((strcmp(old_func->old_name, func->old_name) == 0) &&
91 		    (old_func->old_sympos == func->old_sympos)) {
92 			return func;
93 		}
94 	}
95 
96 	return NULL;
97 }
98 
99 static struct klp_object *klp_find_object(struct klp_patch *patch,
100 					  struct klp_object *old_obj)
101 {
102 	struct klp_object *obj;
103 
104 	klp_for_each_object(patch, obj) {
105 		if (klp_is_module(old_obj)) {
106 			if (klp_is_module(obj) &&
107 			    strcmp(old_obj->name, obj->name) == 0) {
108 				return obj;
109 			}
110 		} else if (!klp_is_module(obj)) {
111 			return obj;
112 		}
113 	}
114 
115 	return NULL;
116 }
117 
118 struct klp_find_arg {
119 	const char *objname;
120 	const char *name;
121 	unsigned long addr;
122 	unsigned long count;
123 	unsigned long pos;
124 };
125 
126 static int klp_find_callback(void *data, const char *name,
127 			     struct module *mod, unsigned long addr)
128 {
129 	struct klp_find_arg *args = data;
130 
131 	if ((mod && !args->objname) || (!mod && args->objname))
132 		return 0;
133 
134 	if (strcmp(args->name, name))
135 		return 0;
136 
137 	if (args->objname && strcmp(args->objname, mod->name))
138 		return 0;
139 
140 	args->addr = addr;
141 	args->count++;
142 
143 	/*
144 	 * Finish the search when the symbol is found for the desired position
145 	 * or the position is not defined for a non-unique symbol.
146 	 */
147 	if ((args->pos && (args->count == args->pos)) ||
148 	    (!args->pos && (args->count > 1)))
149 		return 1;
150 
151 	return 0;
152 }
153 
154 static int klp_find_object_symbol(const char *objname, const char *name,
155 				  unsigned long sympos, unsigned long *addr)
156 {
157 	struct klp_find_arg args = {
158 		.objname = objname,
159 		.name = name,
160 		.addr = 0,
161 		.count = 0,
162 		.pos = sympos,
163 	};
164 
165 	mutex_lock(&module_mutex);
166 	if (objname)
167 		module_kallsyms_on_each_symbol(klp_find_callback, &args);
168 	else
169 		kallsyms_on_each_symbol(klp_find_callback, &args);
170 	mutex_unlock(&module_mutex);
171 
172 	/*
173 	 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
174 	 * otherwise ensure the symbol position count matches sympos.
175 	 */
176 	if (args.addr == 0)
177 		pr_err("symbol '%s' not found in symbol table\n", name);
178 	else if (args.count > 1 && sympos == 0) {
179 		pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
180 		       name, objname);
181 	} else if (sympos != args.count && sympos > 0) {
182 		pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
183 		       sympos, name, objname ? objname : "vmlinux");
184 	} else {
185 		*addr = args.addr;
186 		return 0;
187 	}
188 
189 	*addr = 0;
190 	return -EINVAL;
191 }
192 
193 static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
194 {
195 	int i, cnt, vmlinux, ret;
196 	char objname[MODULE_NAME_LEN];
197 	char symname[KSYM_NAME_LEN];
198 	char *strtab = pmod->core_kallsyms.strtab;
199 	Elf_Rela *relas;
200 	Elf_Sym *sym;
201 	unsigned long sympos, addr;
202 
203 	/*
204 	 * Since the field widths for objname and symname in the sscanf()
205 	 * call are hard-coded and correspond to MODULE_NAME_LEN and
206 	 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
207 	 * and KSYM_NAME_LEN have the values we expect them to have.
208 	 *
209 	 * Because the value of MODULE_NAME_LEN can differ among architectures,
210 	 * we use the smallest/strictest upper bound possible (56, based on
211 	 * the current definition of MODULE_NAME_LEN) to prevent overflows.
212 	 */
213 	BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
214 
215 	relas = (Elf_Rela *) relasec->sh_addr;
216 	/* For each rela in this klp relocation section */
217 	for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
218 		sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info);
219 		if (sym->st_shndx != SHN_LIVEPATCH) {
220 			pr_err("symbol %s is not marked as a livepatch symbol\n",
221 			       strtab + sym->st_name);
222 			return -EINVAL;
223 		}
224 
225 		/* Format: .klp.sym.objname.symname,sympos */
226 		cnt = sscanf(strtab + sym->st_name,
227 			     ".klp.sym.%55[^.].%127[^,],%lu",
228 			     objname, symname, &sympos);
229 		if (cnt != 3) {
230 			pr_err("symbol %s has an incorrectly formatted name\n",
231 			       strtab + sym->st_name);
232 			return -EINVAL;
233 		}
234 
235 		/* klp_find_object_symbol() treats a NULL objname as vmlinux */
236 		vmlinux = !strcmp(objname, "vmlinux");
237 		ret = klp_find_object_symbol(vmlinux ? NULL : objname,
238 					     symname, sympos, &addr);
239 		if (ret)
240 			return ret;
241 
242 		sym->st_value = addr;
243 	}
244 
245 	return 0;
246 }
247 
248 static int klp_write_object_relocations(struct module *pmod,
249 					struct klp_object *obj)
250 {
251 	int i, cnt, ret = 0;
252 	const char *objname, *secname;
253 	char sec_objname[MODULE_NAME_LEN];
254 	Elf_Shdr *sec;
255 
256 	if (WARN_ON(!klp_is_object_loaded(obj)))
257 		return -EINVAL;
258 
259 	objname = klp_is_module(obj) ? obj->name : "vmlinux";
260 
261 	/* For each klp relocation section */
262 	for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) {
263 		sec = pmod->klp_info->sechdrs + i;
264 		secname = pmod->klp_info->secstrings + sec->sh_name;
265 		if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
266 			continue;
267 
268 		/*
269 		 * Format: .klp.rela.sec_objname.section_name
270 		 * See comment in klp_resolve_symbols() for an explanation
271 		 * of the selected field width value.
272 		 */
273 		cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname);
274 		if (cnt != 1) {
275 			pr_err("section %s has an incorrectly formatted name\n",
276 			       secname);
277 			ret = -EINVAL;
278 			break;
279 		}
280 
281 		if (strcmp(objname, sec_objname))
282 			continue;
283 
284 		ret = klp_resolve_symbols(sec, pmod);
285 		if (ret)
286 			break;
287 
288 		ret = apply_relocate_add(pmod->klp_info->sechdrs,
289 					 pmod->core_kallsyms.strtab,
290 					 pmod->klp_info->symndx, i, pmod);
291 		if (ret)
292 			break;
293 	}
294 
295 	return ret;
296 }
297 
298 /*
299  * Sysfs Interface
300  *
301  * /sys/kernel/livepatch
302  * /sys/kernel/livepatch/<patch>
303  * /sys/kernel/livepatch/<patch>/enabled
304  * /sys/kernel/livepatch/<patch>/transition
305  * /sys/kernel/livepatch/<patch>/force
306  * /sys/kernel/livepatch/<patch>/<object>
307  * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
308  */
309 static int __klp_disable_patch(struct klp_patch *patch);
310 
311 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
312 			     const char *buf, size_t count)
313 {
314 	struct klp_patch *patch;
315 	int ret;
316 	bool enabled;
317 
318 	ret = kstrtobool(buf, &enabled);
319 	if (ret)
320 		return ret;
321 
322 	patch = container_of(kobj, struct klp_patch, kobj);
323 
324 	mutex_lock(&klp_mutex);
325 
326 	if (patch->enabled == enabled) {
327 		/* already in requested state */
328 		ret = -EINVAL;
329 		goto out;
330 	}
331 
332 	/*
333 	 * Allow to reverse a pending transition in both ways. It might be
334 	 * necessary to complete the transition without forcing and breaking
335 	 * the system integrity.
336 	 *
337 	 * Do not allow to re-enable a disabled patch.
338 	 */
339 	if (patch == klp_transition_patch)
340 		klp_reverse_transition();
341 	else if (!enabled)
342 		ret = __klp_disable_patch(patch);
343 	else
344 		ret = -EINVAL;
345 
346 out:
347 	mutex_unlock(&klp_mutex);
348 
349 	if (ret)
350 		return ret;
351 	return count;
352 }
353 
354 static ssize_t enabled_show(struct kobject *kobj,
355 			    struct kobj_attribute *attr, char *buf)
356 {
357 	struct klp_patch *patch;
358 
359 	patch = container_of(kobj, struct klp_patch, kobj);
360 	return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled);
361 }
362 
363 static ssize_t transition_show(struct kobject *kobj,
364 			       struct kobj_attribute *attr, char *buf)
365 {
366 	struct klp_patch *patch;
367 
368 	patch = container_of(kobj, struct klp_patch, kobj);
369 	return snprintf(buf, PAGE_SIZE-1, "%d\n",
370 			patch == klp_transition_patch);
371 }
372 
373 static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
374 			   const char *buf, size_t count)
375 {
376 	struct klp_patch *patch;
377 	int ret;
378 	bool val;
379 
380 	ret = kstrtobool(buf, &val);
381 	if (ret)
382 		return ret;
383 
384 	if (!val)
385 		return count;
386 
387 	mutex_lock(&klp_mutex);
388 
389 	patch = container_of(kobj, struct klp_patch, kobj);
390 	if (patch != klp_transition_patch) {
391 		mutex_unlock(&klp_mutex);
392 		return -EINVAL;
393 	}
394 
395 	klp_force_transition();
396 
397 	mutex_unlock(&klp_mutex);
398 
399 	return count;
400 }
401 
402 static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
403 static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
404 static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
405 static struct attribute *klp_patch_attrs[] = {
406 	&enabled_kobj_attr.attr,
407 	&transition_kobj_attr.attr,
408 	&force_kobj_attr.attr,
409 	NULL
410 };
411 ATTRIBUTE_GROUPS(klp_patch);
412 
413 static void klp_free_object_dynamic(struct klp_object *obj)
414 {
415 	kfree(obj->name);
416 	kfree(obj);
417 }
418 
419 static void klp_init_func_early(struct klp_object *obj,
420 				struct klp_func *func);
421 static void klp_init_object_early(struct klp_patch *patch,
422 				  struct klp_object *obj);
423 
424 static struct klp_object *klp_alloc_object_dynamic(const char *name,
425 						   struct klp_patch *patch)
426 {
427 	struct klp_object *obj;
428 
429 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
430 	if (!obj)
431 		return NULL;
432 
433 	if (name) {
434 		obj->name = kstrdup(name, GFP_KERNEL);
435 		if (!obj->name) {
436 			kfree(obj);
437 			return NULL;
438 		}
439 	}
440 
441 	klp_init_object_early(patch, obj);
442 	obj->dynamic = true;
443 
444 	return obj;
445 }
446 
447 static void klp_free_func_nop(struct klp_func *func)
448 {
449 	kfree(func->old_name);
450 	kfree(func);
451 }
452 
453 static struct klp_func *klp_alloc_func_nop(struct klp_func *old_func,
454 					   struct klp_object *obj)
455 {
456 	struct klp_func *func;
457 
458 	func = kzalloc(sizeof(*func), GFP_KERNEL);
459 	if (!func)
460 		return NULL;
461 
462 	if (old_func->old_name) {
463 		func->old_name = kstrdup(old_func->old_name, GFP_KERNEL);
464 		if (!func->old_name) {
465 			kfree(func);
466 			return NULL;
467 		}
468 	}
469 
470 	klp_init_func_early(obj, func);
471 	/*
472 	 * func->new_func is same as func->old_func. These addresses are
473 	 * set when the object is loaded, see klp_init_object_loaded().
474 	 */
475 	func->old_sympos = old_func->old_sympos;
476 	func->nop = true;
477 
478 	return func;
479 }
480 
481 static int klp_add_object_nops(struct klp_patch *patch,
482 			       struct klp_object *old_obj)
483 {
484 	struct klp_object *obj;
485 	struct klp_func *func, *old_func;
486 
487 	obj = klp_find_object(patch, old_obj);
488 
489 	if (!obj) {
490 		obj = klp_alloc_object_dynamic(old_obj->name, patch);
491 		if (!obj)
492 			return -ENOMEM;
493 	}
494 
495 	klp_for_each_func(old_obj, old_func) {
496 		func = klp_find_func(obj, old_func);
497 		if (func)
498 			continue;
499 
500 		func = klp_alloc_func_nop(old_func, obj);
501 		if (!func)
502 			return -ENOMEM;
503 	}
504 
505 	return 0;
506 }
507 
508 /*
509  * Add 'nop' functions which simply return to the caller to run
510  * the original function. The 'nop' functions are added to a
511  * patch to facilitate a 'replace' mode.
512  */
513 static int klp_add_nops(struct klp_patch *patch)
514 {
515 	struct klp_patch *old_patch;
516 	struct klp_object *old_obj;
517 
518 	klp_for_each_patch(old_patch) {
519 		klp_for_each_object(old_patch, old_obj) {
520 			int err;
521 
522 			err = klp_add_object_nops(patch, old_obj);
523 			if (err)
524 				return err;
525 		}
526 	}
527 
528 	return 0;
529 }
530 
531 static void klp_kobj_release_patch(struct kobject *kobj)
532 {
533 	struct klp_patch *patch;
534 
535 	patch = container_of(kobj, struct klp_patch, kobj);
536 	complete(&patch->finish);
537 }
538 
539 static struct kobj_type klp_ktype_patch = {
540 	.release = klp_kobj_release_patch,
541 	.sysfs_ops = &kobj_sysfs_ops,
542 	.default_groups = klp_patch_groups,
543 };
544 
545 static void klp_kobj_release_object(struct kobject *kobj)
546 {
547 	struct klp_object *obj;
548 
549 	obj = container_of(kobj, struct klp_object, kobj);
550 
551 	if (obj->dynamic)
552 		klp_free_object_dynamic(obj);
553 }
554 
555 static struct kobj_type klp_ktype_object = {
556 	.release = klp_kobj_release_object,
557 	.sysfs_ops = &kobj_sysfs_ops,
558 };
559 
560 static void klp_kobj_release_func(struct kobject *kobj)
561 {
562 	struct klp_func *func;
563 
564 	func = container_of(kobj, struct klp_func, kobj);
565 
566 	if (func->nop)
567 		klp_free_func_nop(func);
568 }
569 
570 static struct kobj_type klp_ktype_func = {
571 	.release = klp_kobj_release_func,
572 	.sysfs_ops = &kobj_sysfs_ops,
573 };
574 
575 static void __klp_free_funcs(struct klp_object *obj, bool nops_only)
576 {
577 	struct klp_func *func, *tmp_func;
578 
579 	klp_for_each_func_safe(obj, func, tmp_func) {
580 		if (nops_only && !func->nop)
581 			continue;
582 
583 		list_del(&func->node);
584 		kobject_put(&func->kobj);
585 	}
586 }
587 
588 /* Clean up when a patched object is unloaded */
589 static void klp_free_object_loaded(struct klp_object *obj)
590 {
591 	struct klp_func *func;
592 
593 	obj->mod = NULL;
594 
595 	klp_for_each_func(obj, func) {
596 		func->old_func = NULL;
597 
598 		if (func->nop)
599 			func->new_func = NULL;
600 	}
601 }
602 
603 static void __klp_free_objects(struct klp_patch *patch, bool nops_only)
604 {
605 	struct klp_object *obj, *tmp_obj;
606 
607 	klp_for_each_object_safe(patch, obj, tmp_obj) {
608 		__klp_free_funcs(obj, nops_only);
609 
610 		if (nops_only && !obj->dynamic)
611 			continue;
612 
613 		list_del(&obj->node);
614 		kobject_put(&obj->kobj);
615 	}
616 }
617 
618 static void klp_free_objects(struct klp_patch *patch)
619 {
620 	__klp_free_objects(patch, false);
621 }
622 
623 static void klp_free_objects_dynamic(struct klp_patch *patch)
624 {
625 	__klp_free_objects(patch, true);
626 }
627 
628 /*
629  * This function implements the free operations that can be called safely
630  * under klp_mutex.
631  *
632  * The operation must be completed by calling klp_free_patch_finish()
633  * outside klp_mutex.
634  */
635 void klp_free_patch_start(struct klp_patch *patch)
636 {
637 	if (!list_empty(&patch->list))
638 		list_del(&patch->list);
639 
640 	klp_free_objects(patch);
641 }
642 
643 /*
644  * This function implements the free part that must be called outside
645  * klp_mutex.
646  *
647  * It must be called after klp_free_patch_start(). And it has to be
648  * the last function accessing the livepatch structures when the patch
649  * gets disabled.
650  */
651 static void klp_free_patch_finish(struct klp_patch *patch)
652 {
653 	/*
654 	 * Avoid deadlock with enabled_store() sysfs callback by
655 	 * calling this outside klp_mutex. It is safe because
656 	 * this is called when the patch gets disabled and it
657 	 * cannot get enabled again.
658 	 */
659 	kobject_put(&patch->kobj);
660 	wait_for_completion(&patch->finish);
661 
662 	/* Put the module after the last access to struct klp_patch. */
663 	if (!patch->forced)
664 		module_put(patch->mod);
665 }
666 
667 /*
668  * The livepatch might be freed from sysfs interface created by the patch.
669  * This work allows to wait until the interface is destroyed in a separate
670  * context.
671  */
672 static void klp_free_patch_work_fn(struct work_struct *work)
673 {
674 	struct klp_patch *patch =
675 		container_of(work, struct klp_patch, free_work);
676 
677 	klp_free_patch_finish(patch);
678 }
679 
680 static int klp_init_func(struct klp_object *obj, struct klp_func *func)
681 {
682 	if (!func->old_name)
683 		return -EINVAL;
684 
685 	/*
686 	 * NOPs get the address later. The patched module must be loaded,
687 	 * see klp_init_object_loaded().
688 	 */
689 	if (!func->new_func && !func->nop)
690 		return -EINVAL;
691 
692 	if (strlen(func->old_name) >= KSYM_NAME_LEN)
693 		return -EINVAL;
694 
695 	INIT_LIST_HEAD(&func->stack_node);
696 	func->patched = false;
697 	func->transition = false;
698 
699 	/* The format for the sysfs directory is <function,sympos> where sympos
700 	 * is the nth occurrence of this symbol in kallsyms for the patched
701 	 * object. If the user selects 0 for old_sympos, then 1 will be used
702 	 * since a unique symbol will be the first occurrence.
703 	 */
704 	return kobject_add(&func->kobj, &obj->kobj, "%s,%lu",
705 			   func->old_name,
706 			   func->old_sympos ? func->old_sympos : 1);
707 }
708 
709 /* Arches may override this to finish any remaining arch-specific tasks */
710 void __weak arch_klp_init_object_loaded(struct klp_patch *patch,
711 					struct klp_object *obj)
712 {
713 }
714 
715 /* parts of the initialization that is done only when the object is loaded */
716 static int klp_init_object_loaded(struct klp_patch *patch,
717 				  struct klp_object *obj)
718 {
719 	struct klp_func *func;
720 	int ret;
721 
722 	mutex_lock(&text_mutex);
723 
724 	module_disable_ro(patch->mod);
725 	ret = klp_write_object_relocations(patch->mod, obj);
726 	if (ret) {
727 		module_enable_ro(patch->mod, true);
728 		mutex_unlock(&text_mutex);
729 		return ret;
730 	}
731 
732 	arch_klp_init_object_loaded(patch, obj);
733 	module_enable_ro(patch->mod, true);
734 
735 	mutex_unlock(&text_mutex);
736 
737 	klp_for_each_func(obj, func) {
738 		ret = klp_find_object_symbol(obj->name, func->old_name,
739 					     func->old_sympos,
740 					     (unsigned long *)&func->old_func);
741 		if (ret)
742 			return ret;
743 
744 		ret = kallsyms_lookup_size_offset((unsigned long)func->old_func,
745 						  &func->old_size, NULL);
746 		if (!ret) {
747 			pr_err("kallsyms size lookup failed for '%s'\n",
748 			       func->old_name);
749 			return -ENOENT;
750 		}
751 
752 		if (func->nop)
753 			func->new_func = func->old_func;
754 
755 		ret = kallsyms_lookup_size_offset((unsigned long)func->new_func,
756 						  &func->new_size, NULL);
757 		if (!ret) {
758 			pr_err("kallsyms size lookup failed for '%s' replacement\n",
759 			       func->old_name);
760 			return -ENOENT;
761 		}
762 	}
763 
764 	return 0;
765 }
766 
767 static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
768 {
769 	struct klp_func *func;
770 	int ret;
771 	const char *name;
772 
773 	if (klp_is_module(obj) && strlen(obj->name) >= MODULE_NAME_LEN)
774 		return -EINVAL;
775 
776 	obj->patched = false;
777 	obj->mod = NULL;
778 
779 	klp_find_object_module(obj);
780 
781 	name = klp_is_module(obj) ? obj->name : "vmlinux";
782 	ret = kobject_add(&obj->kobj, &patch->kobj, "%s", name);
783 	if (ret)
784 		return ret;
785 
786 	klp_for_each_func(obj, func) {
787 		ret = klp_init_func(obj, func);
788 		if (ret)
789 			return ret;
790 	}
791 
792 	if (klp_is_object_loaded(obj))
793 		ret = klp_init_object_loaded(patch, obj);
794 
795 	return ret;
796 }
797 
798 static void klp_init_func_early(struct klp_object *obj,
799 				struct klp_func *func)
800 {
801 	kobject_init(&func->kobj, &klp_ktype_func);
802 	list_add_tail(&func->node, &obj->func_list);
803 }
804 
805 static void klp_init_object_early(struct klp_patch *patch,
806 				  struct klp_object *obj)
807 {
808 	INIT_LIST_HEAD(&obj->func_list);
809 	kobject_init(&obj->kobj, &klp_ktype_object);
810 	list_add_tail(&obj->node, &patch->obj_list);
811 }
812 
813 static int klp_init_patch_early(struct klp_patch *patch)
814 {
815 	struct klp_object *obj;
816 	struct klp_func *func;
817 
818 	if (!patch->objs)
819 		return -EINVAL;
820 
821 	INIT_LIST_HEAD(&patch->list);
822 	INIT_LIST_HEAD(&patch->obj_list);
823 	kobject_init(&patch->kobj, &klp_ktype_patch);
824 	patch->enabled = false;
825 	patch->forced = false;
826 	INIT_WORK(&patch->free_work, klp_free_patch_work_fn);
827 	init_completion(&patch->finish);
828 
829 	klp_for_each_object_static(patch, obj) {
830 		if (!obj->funcs)
831 			return -EINVAL;
832 
833 		klp_init_object_early(patch, obj);
834 
835 		klp_for_each_func_static(obj, func) {
836 			klp_init_func_early(obj, func);
837 		}
838 	}
839 
840 	if (!try_module_get(patch->mod))
841 		return -ENODEV;
842 
843 	return 0;
844 }
845 
846 static int klp_init_patch(struct klp_patch *patch)
847 {
848 	struct klp_object *obj;
849 	int ret;
850 
851 	ret = kobject_add(&patch->kobj, klp_root_kobj, "%s", patch->mod->name);
852 	if (ret)
853 		return ret;
854 
855 	if (patch->replace) {
856 		ret = klp_add_nops(patch);
857 		if (ret)
858 			return ret;
859 	}
860 
861 	klp_for_each_object(patch, obj) {
862 		ret = klp_init_object(patch, obj);
863 		if (ret)
864 			return ret;
865 	}
866 
867 	list_add_tail(&patch->list, &klp_patches);
868 
869 	return 0;
870 }
871 
872 static int __klp_disable_patch(struct klp_patch *patch)
873 {
874 	struct klp_object *obj;
875 
876 	if (WARN_ON(!patch->enabled))
877 		return -EINVAL;
878 
879 	if (klp_transition_patch)
880 		return -EBUSY;
881 
882 	klp_init_transition(patch, KLP_UNPATCHED);
883 
884 	klp_for_each_object(patch, obj)
885 		if (obj->patched)
886 			klp_pre_unpatch_callback(obj);
887 
888 	/*
889 	 * Enforce the order of the func->transition writes in
890 	 * klp_init_transition() and the TIF_PATCH_PENDING writes in
891 	 * klp_start_transition().  In the rare case where klp_ftrace_handler()
892 	 * is called shortly after klp_update_patch_state() switches the task,
893 	 * this ensures the handler sees that func->transition is set.
894 	 */
895 	smp_wmb();
896 
897 	klp_start_transition();
898 	patch->enabled = false;
899 	klp_try_complete_transition();
900 
901 	return 0;
902 }
903 
904 static int __klp_enable_patch(struct klp_patch *patch)
905 {
906 	struct klp_object *obj;
907 	int ret;
908 
909 	if (klp_transition_patch)
910 		return -EBUSY;
911 
912 	if (WARN_ON(patch->enabled))
913 		return -EINVAL;
914 
915 	pr_notice("enabling patch '%s'\n", patch->mod->name);
916 
917 	klp_init_transition(patch, KLP_PATCHED);
918 
919 	/*
920 	 * Enforce the order of the func->transition writes in
921 	 * klp_init_transition() and the ops->func_stack writes in
922 	 * klp_patch_object(), so that klp_ftrace_handler() will see the
923 	 * func->transition updates before the handler is registered and the
924 	 * new funcs become visible to the handler.
925 	 */
926 	smp_wmb();
927 
928 	klp_for_each_object(patch, obj) {
929 		if (!klp_is_object_loaded(obj))
930 			continue;
931 
932 		ret = klp_pre_patch_callback(obj);
933 		if (ret) {
934 			pr_warn("pre-patch callback failed for object '%s'\n",
935 				klp_is_module(obj) ? obj->name : "vmlinux");
936 			goto err;
937 		}
938 
939 		ret = klp_patch_object(obj);
940 		if (ret) {
941 			pr_warn("failed to patch object '%s'\n",
942 				klp_is_module(obj) ? obj->name : "vmlinux");
943 			goto err;
944 		}
945 	}
946 
947 	klp_start_transition();
948 	patch->enabled = true;
949 	klp_try_complete_transition();
950 
951 	return 0;
952 err:
953 	pr_warn("failed to enable patch '%s'\n", patch->mod->name);
954 
955 	klp_cancel_transition();
956 	return ret;
957 }
958 
959 /**
960  * klp_enable_patch() - enable the livepatch
961  * @patch:	patch to be enabled
962  *
963  * Initializes the data structure associated with the patch, creates the sysfs
964  * interface, performs the needed symbol lookups and code relocations,
965  * registers the patched functions with ftrace.
966  *
967  * This function is supposed to be called from the livepatch module_init()
968  * callback.
969  *
970  * Return: 0 on success, otherwise error
971  */
972 int klp_enable_patch(struct klp_patch *patch)
973 {
974 	int ret;
975 
976 	if (!patch || !patch->mod)
977 		return -EINVAL;
978 
979 	if (!is_livepatch_module(patch->mod)) {
980 		pr_err("module %s is not marked as a livepatch module\n",
981 		       patch->mod->name);
982 		return -EINVAL;
983 	}
984 
985 	if (!klp_initialized())
986 		return -ENODEV;
987 
988 	if (!klp_have_reliable_stack()) {
989 		pr_warn("This architecture doesn't have support for the livepatch consistency model.\n");
990 		pr_warn("The livepatch transition may never complete.\n");
991 	}
992 
993 	mutex_lock(&klp_mutex);
994 
995 	ret = klp_init_patch_early(patch);
996 	if (ret) {
997 		mutex_unlock(&klp_mutex);
998 		return ret;
999 	}
1000 
1001 	ret = klp_init_patch(patch);
1002 	if (ret)
1003 		goto err;
1004 
1005 	ret = __klp_enable_patch(patch);
1006 	if (ret)
1007 		goto err;
1008 
1009 	mutex_unlock(&klp_mutex);
1010 
1011 	return 0;
1012 
1013 err:
1014 	klp_free_patch_start(patch);
1015 
1016 	mutex_unlock(&klp_mutex);
1017 
1018 	klp_free_patch_finish(patch);
1019 
1020 	return ret;
1021 }
1022 EXPORT_SYMBOL_GPL(klp_enable_patch);
1023 
1024 /*
1025  * This function removes replaced patches.
1026  *
1027  * We could be pretty aggressive here. It is called in the situation where
1028  * these structures are no longer accessible. All functions are redirected
1029  * by the klp_transition_patch. They use either a new code or they are in
1030  * the original code because of the special nop function patches.
1031  *
1032  * The only exception is when the transition was forced. In this case,
1033  * klp_ftrace_handler() might still see the replaced patch on the stack.
1034  * Fortunately, it is carefully designed to work with removed functions
1035  * thanks to RCU. We only have to keep the patches on the system. Also
1036  * this is handled transparently by patch->module_put.
1037  */
1038 void klp_discard_replaced_patches(struct klp_patch *new_patch)
1039 {
1040 	struct klp_patch *old_patch, *tmp_patch;
1041 
1042 	klp_for_each_patch_safe(old_patch, tmp_patch) {
1043 		if (old_patch == new_patch)
1044 			return;
1045 
1046 		old_patch->enabled = false;
1047 		klp_unpatch_objects(old_patch);
1048 		klp_free_patch_start(old_patch);
1049 		schedule_work(&old_patch->free_work);
1050 	}
1051 }
1052 
1053 /*
1054  * This function removes the dynamically allocated 'nop' functions.
1055  *
1056  * We could be pretty aggressive. NOPs do not change the existing
1057  * behavior except for adding unnecessary delay by the ftrace handler.
1058  *
1059  * It is safe even when the transition was forced. The ftrace handler
1060  * will see a valid ops->func_stack entry thanks to RCU.
1061  *
1062  * We could even free the NOPs structures. They must be the last entry
1063  * in ops->func_stack. Therefore unregister_ftrace_function() is called.
1064  * It does the same as klp_synchronize_transition() to make sure that
1065  * nobody is inside the ftrace handler once the operation finishes.
1066  *
1067  * IMPORTANT: It must be called right after removing the replaced patches!
1068  */
1069 void klp_discard_nops(struct klp_patch *new_patch)
1070 {
1071 	klp_unpatch_objects_dynamic(klp_transition_patch);
1072 	klp_free_objects_dynamic(klp_transition_patch);
1073 }
1074 
1075 /*
1076  * Remove parts of patches that touch a given kernel module. The list of
1077  * patches processed might be limited. When limit is NULL, all patches
1078  * will be handled.
1079  */
1080 static void klp_cleanup_module_patches_limited(struct module *mod,
1081 					       struct klp_patch *limit)
1082 {
1083 	struct klp_patch *patch;
1084 	struct klp_object *obj;
1085 
1086 	klp_for_each_patch(patch) {
1087 		if (patch == limit)
1088 			break;
1089 
1090 		klp_for_each_object(patch, obj) {
1091 			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1092 				continue;
1093 
1094 			if (patch != klp_transition_patch)
1095 				klp_pre_unpatch_callback(obj);
1096 
1097 			pr_notice("reverting patch '%s' on unloading module '%s'\n",
1098 				  patch->mod->name, obj->mod->name);
1099 			klp_unpatch_object(obj);
1100 
1101 			klp_post_unpatch_callback(obj);
1102 
1103 			klp_free_object_loaded(obj);
1104 			break;
1105 		}
1106 	}
1107 }
1108 
1109 int klp_module_coming(struct module *mod)
1110 {
1111 	int ret;
1112 	struct klp_patch *patch;
1113 	struct klp_object *obj;
1114 
1115 	if (WARN_ON(mod->state != MODULE_STATE_COMING))
1116 		return -EINVAL;
1117 
1118 	mutex_lock(&klp_mutex);
1119 	/*
1120 	 * Each module has to know that klp_module_coming()
1121 	 * has been called. We never know what module will
1122 	 * get patched by a new patch.
1123 	 */
1124 	mod->klp_alive = true;
1125 
1126 	klp_for_each_patch(patch) {
1127 		klp_for_each_object(patch, obj) {
1128 			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1129 				continue;
1130 
1131 			obj->mod = mod;
1132 
1133 			ret = klp_init_object_loaded(patch, obj);
1134 			if (ret) {
1135 				pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
1136 					patch->mod->name, obj->mod->name, ret);
1137 				goto err;
1138 			}
1139 
1140 			pr_notice("applying patch '%s' to loading module '%s'\n",
1141 				  patch->mod->name, obj->mod->name);
1142 
1143 			ret = klp_pre_patch_callback(obj);
1144 			if (ret) {
1145 				pr_warn("pre-patch callback failed for object '%s'\n",
1146 					obj->name);
1147 				goto err;
1148 			}
1149 
1150 			ret = klp_patch_object(obj);
1151 			if (ret) {
1152 				pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
1153 					patch->mod->name, obj->mod->name, ret);
1154 
1155 				klp_post_unpatch_callback(obj);
1156 				goto err;
1157 			}
1158 
1159 			if (patch != klp_transition_patch)
1160 				klp_post_patch_callback(obj);
1161 
1162 			break;
1163 		}
1164 	}
1165 
1166 	mutex_unlock(&klp_mutex);
1167 
1168 	return 0;
1169 
1170 err:
1171 	/*
1172 	 * If a patch is unsuccessfully applied, return
1173 	 * error to the module loader.
1174 	 */
1175 	pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
1176 		patch->mod->name, obj->mod->name, obj->mod->name);
1177 	mod->klp_alive = false;
1178 	klp_cleanup_module_patches_limited(mod, patch);
1179 	mutex_unlock(&klp_mutex);
1180 
1181 	return ret;
1182 }
1183 
1184 void klp_module_going(struct module *mod)
1185 {
1186 	if (WARN_ON(mod->state != MODULE_STATE_GOING &&
1187 		    mod->state != MODULE_STATE_COMING))
1188 		return;
1189 
1190 	mutex_lock(&klp_mutex);
1191 	/*
1192 	 * Each module has to know that klp_module_going()
1193 	 * has been called. We never know what module will
1194 	 * get patched by a new patch.
1195 	 */
1196 	mod->klp_alive = false;
1197 
1198 	klp_cleanup_module_patches_limited(mod, NULL);
1199 
1200 	mutex_unlock(&klp_mutex);
1201 }
1202 
1203 static int __init klp_init(void)
1204 {
1205 	klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
1206 	if (!klp_root_kobj)
1207 		return -ENOMEM;
1208 
1209 	return 0;
1210 }
1211 
1212 module_init(klp_init);
1213