xref: /linux/kernel/scs.c (revision 9beccca0)
1d08b9f0cSSami Tolvanen // SPDX-License-Identifier: GPL-2.0
2d08b9f0cSSami Tolvanen /*
3d08b9f0cSSami Tolvanen  * Shadow Call Stack support.
4d08b9f0cSSami Tolvanen  *
5d08b9f0cSSami Tolvanen  * Copyright (C) 2019 Google LLC
6d08b9f0cSSami Tolvanen  */
7d08b9f0cSSami Tolvanen 
8a2abe7cbSSami Tolvanen #include <linux/cpuhotplug.h>
9d08b9f0cSSami Tolvanen #include <linux/kasan.h>
10628d06a4SSami Tolvanen #include <linux/mm.h>
11d08b9f0cSSami Tolvanen #include <linux/scs.h>
12a2abe7cbSSami Tolvanen #include <linux/vmalloc.h>
13628d06a4SSami Tolvanen #include <linux/vmstat.h>
14d08b9f0cSSami Tolvanen 
15*9beccca0SArd Biesheuvel #ifdef CONFIG_DYNAMIC_SCS
16*9beccca0SArd Biesheuvel DEFINE_STATIC_KEY_FALSE(dynamic_scs_enabled);
17*9beccca0SArd Biesheuvel #endif
18*9beccca0SArd Biesheuvel 
__scs_account(void * s,int account)19bee348faSWill Deacon static void __scs_account(void *s, int account)
20bee348faSWill Deacon {
21a2abe7cbSSami Tolvanen 	struct page *scs_page = vmalloc_to_page(s);
22bee348faSWill Deacon 
23991e7673SShakeel Butt 	mod_node_page_state(page_pgdat(scs_page), NR_KERNEL_SCS_KB,
24bee348faSWill Deacon 			    account * (SCS_SIZE / SZ_1K));
25bee348faSWill Deacon }
26bee348faSWill Deacon 
27a2abe7cbSSami Tolvanen /* Matches NR_CACHED_STACKS for VMAP_STACK */
28a2abe7cbSSami Tolvanen #define NR_CACHED_SCS 2
29a2abe7cbSSami Tolvanen static DEFINE_PER_CPU(void *, scs_cache[NR_CACHED_SCS]);
30d08b9f0cSSami Tolvanen 
__scs_alloc(int node)31a2abe7cbSSami Tolvanen static void *__scs_alloc(int node)
32a2abe7cbSSami Tolvanen {
33a2abe7cbSSami Tolvanen 	int i;
34a2abe7cbSSami Tolvanen 	void *s;
35a2abe7cbSSami Tolvanen 
36a2abe7cbSSami Tolvanen 	for (i = 0; i < NR_CACHED_SCS; i++) {
37a2abe7cbSSami Tolvanen 		s = this_cpu_xchg(scs_cache[i], NULL);
38a2abe7cbSSami Tolvanen 		if (s) {
39f6e39794SAndrey Konovalov 			s = kasan_unpoison_vmalloc(s, SCS_SIZE,
40f6e39794SAndrey Konovalov 						   KASAN_VMALLOC_PROT_NORMAL);
41a2abe7cbSSami Tolvanen 			memset(s, 0, SCS_SIZE);
42f6e39794SAndrey Konovalov 			goto out;
43a2abe7cbSSami Tolvanen 		}
44a2abe7cbSSami Tolvanen 	}
45a2abe7cbSSami Tolvanen 
46f6e39794SAndrey Konovalov 	s = __vmalloc_node_range(SCS_SIZE, 1, VMALLOC_START, VMALLOC_END,
47a2abe7cbSSami Tolvanen 				    GFP_SCS, PAGE_KERNEL, 0, node,
48a2abe7cbSSami Tolvanen 				    __builtin_return_address(0));
49f6e39794SAndrey Konovalov 
50f6e39794SAndrey Konovalov out:
51f6e39794SAndrey Konovalov 	return kasan_reset_tag(s);
52a2abe7cbSSami Tolvanen }
53a2abe7cbSSami Tolvanen 
scs_alloc(int node)54a2abe7cbSSami Tolvanen void *scs_alloc(int node)
55a2abe7cbSSami Tolvanen {
56a2abe7cbSSami Tolvanen 	void *s;
57a2abe7cbSSami Tolvanen 
58a2abe7cbSSami Tolvanen 	s = __scs_alloc(node);
59bee348faSWill Deacon 	if (!s)
60bee348faSWill Deacon 		return NULL;
61bee348faSWill Deacon 
62d08b9f0cSSami Tolvanen 	*__scs_magic(s) = SCS_END_MAGIC;
63bee348faSWill Deacon 
64d08b9f0cSSami Tolvanen 	/*
65d08b9f0cSSami Tolvanen 	 * Poison the allocation to catch unintentional accesses to
66d08b9f0cSSami Tolvanen 	 * the shadow stack when KASAN is enabled.
67d08b9f0cSSami Tolvanen 	 */
68a2abe7cbSSami Tolvanen 	kasan_poison_vmalloc(s, SCS_SIZE);
69bee348faSWill Deacon 	__scs_account(s, 1);
70d08b9f0cSSami Tolvanen 	return s;
71d08b9f0cSSami Tolvanen }
72d08b9f0cSSami Tolvanen 
scs_free(void * s)73a2abe7cbSSami Tolvanen void scs_free(void *s)
74d08b9f0cSSami Tolvanen {
75a2abe7cbSSami Tolvanen 	int i;
76a2abe7cbSSami Tolvanen 
77bee348faSWill Deacon 	__scs_account(s, -1);
78a2abe7cbSSami Tolvanen 
79a2abe7cbSSami Tolvanen 	/*
80a2abe7cbSSami Tolvanen 	 * We cannot sleep as this can be called in interrupt context,
81a2abe7cbSSami Tolvanen 	 * so use this_cpu_cmpxchg to update the cache, and vfree_atomic
82a2abe7cbSSami Tolvanen 	 * to free the stack.
83a2abe7cbSSami Tolvanen 	 */
84a2abe7cbSSami Tolvanen 
85a2abe7cbSSami Tolvanen 	for (i = 0; i < NR_CACHED_SCS; i++)
86a2abe7cbSSami Tolvanen 		if (this_cpu_cmpxchg(scs_cache[i], 0, s) == NULL)
87a2abe7cbSSami Tolvanen 			return;
88a2abe7cbSSami Tolvanen 
89f6e39794SAndrey Konovalov 	kasan_unpoison_vmalloc(s, SCS_SIZE, KASAN_VMALLOC_PROT_NORMAL);
90a2abe7cbSSami Tolvanen 	vfree_atomic(s);
91a2abe7cbSSami Tolvanen }
92a2abe7cbSSami Tolvanen 
scs_cleanup(unsigned int cpu)93a2abe7cbSSami Tolvanen static int scs_cleanup(unsigned int cpu)
94a2abe7cbSSami Tolvanen {
95a2abe7cbSSami Tolvanen 	int i;
96a2abe7cbSSami Tolvanen 	void **cache = per_cpu_ptr(scs_cache, cpu);
97a2abe7cbSSami Tolvanen 
98a2abe7cbSSami Tolvanen 	for (i = 0; i < NR_CACHED_SCS; i++) {
99a2abe7cbSSami Tolvanen 		vfree(cache[i]);
100a2abe7cbSSami Tolvanen 		cache[i] = NULL;
101a2abe7cbSSami Tolvanen 	}
102a2abe7cbSSami Tolvanen 
103a2abe7cbSSami Tolvanen 	return 0;
104d08b9f0cSSami Tolvanen }
105d08b9f0cSSami Tolvanen 
scs_init(void)106d08b9f0cSSami Tolvanen void __init scs_init(void)
107d08b9f0cSSami Tolvanen {
108*9beccca0SArd Biesheuvel 	if (!scs_is_enabled())
109*9beccca0SArd Biesheuvel 		return;
110a2abe7cbSSami Tolvanen 	cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "scs:scs_cache", NULL,
111a2abe7cbSSami Tolvanen 			  scs_cleanup);
112d08b9f0cSSami Tolvanen }
113d08b9f0cSSami Tolvanen 
scs_prepare(struct task_struct * tsk,int node)114d08b9f0cSSami Tolvanen int scs_prepare(struct task_struct *tsk, int node)
115d08b9f0cSSami Tolvanen {
116*9beccca0SArd Biesheuvel 	void *s;
117d08b9f0cSSami Tolvanen 
118*9beccca0SArd Biesheuvel 	if (!scs_is_enabled())
119*9beccca0SArd Biesheuvel 		return 0;
120*9beccca0SArd Biesheuvel 
121*9beccca0SArd Biesheuvel 	s = scs_alloc(node);
122d08b9f0cSSami Tolvanen 	if (!s)
123d08b9f0cSSami Tolvanen 		return -ENOMEM;
124d08b9f0cSSami Tolvanen 
12551189c7aSWill Deacon 	task_scs(tsk) = task_scs_sp(tsk) = s;
126d08b9f0cSSami Tolvanen 	return 0;
127d08b9f0cSSami Tolvanen }
128d08b9f0cSSami Tolvanen 
scs_check_usage(struct task_struct * tsk)1295bbaf9d1SSami Tolvanen static void scs_check_usage(struct task_struct *tsk)
1305bbaf9d1SSami Tolvanen {
1315bbaf9d1SSami Tolvanen 	static unsigned long highest;
1325bbaf9d1SSami Tolvanen 
1335bbaf9d1SSami Tolvanen 	unsigned long *p, prev, curr = highest, used = 0;
1345bbaf9d1SSami Tolvanen 
1355bbaf9d1SSami Tolvanen 	if (!IS_ENABLED(CONFIG_DEBUG_STACK_USAGE))
1365bbaf9d1SSami Tolvanen 		return;
1375bbaf9d1SSami Tolvanen 
1385bbaf9d1SSami Tolvanen 	for (p = task_scs(tsk); p < __scs_magic(tsk); ++p) {
1395bbaf9d1SSami Tolvanen 		if (!READ_ONCE_NOCHECK(*p))
1405bbaf9d1SSami Tolvanen 			break;
141333ed746SWill Deacon 		used += sizeof(*p);
1425bbaf9d1SSami Tolvanen 	}
1435bbaf9d1SSami Tolvanen 
1445bbaf9d1SSami Tolvanen 	while (used > curr) {
1455bbaf9d1SSami Tolvanen 		prev = cmpxchg_relaxed(&highest, curr, used);
1465bbaf9d1SSami Tolvanen 
1475bbaf9d1SSami Tolvanen 		if (prev == curr) {
1485bbaf9d1SSami Tolvanen 			pr_info("%s (%d): highest shadow stack usage: %lu bytes\n",
1495bbaf9d1SSami Tolvanen 				tsk->comm, task_pid_nr(tsk), used);
1505bbaf9d1SSami Tolvanen 			break;
1515bbaf9d1SSami Tolvanen 		}
1525bbaf9d1SSami Tolvanen 
1535bbaf9d1SSami Tolvanen 		curr = prev;
1545bbaf9d1SSami Tolvanen 	}
1555bbaf9d1SSami Tolvanen }
1565bbaf9d1SSami Tolvanen 
scs_release(struct task_struct * tsk)157d08b9f0cSSami Tolvanen void scs_release(struct task_struct *tsk)
158d08b9f0cSSami Tolvanen {
159d08b9f0cSSami Tolvanen 	void *s = task_scs(tsk);
160d08b9f0cSSami Tolvanen 
161*9beccca0SArd Biesheuvel 	if (!scs_is_enabled() || !s)
162d08b9f0cSSami Tolvanen 		return;
163d08b9f0cSSami Tolvanen 
16488485be5SWill Deacon 	WARN(task_scs_end_corrupted(tsk),
16588485be5SWill Deacon 	     "corrupted shadow stack detected when freeing task\n");
1665bbaf9d1SSami Tolvanen 	scs_check_usage(tsk);
167d08b9f0cSSami Tolvanen 	scs_free(s);
168d08b9f0cSSami Tolvanen }
169