1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * EFI capsule support.
4  *
5  * Copyright 2013 Intel Corporation; author Matt Fleming
6  */
7 
8 #define pr_fmt(fmt) "efi: " fmt
9 
10 #include <linux/slab.h>
11 #include <linux/mutex.h>
12 #include <linux/highmem.h>
13 #include <linux/efi.h>
14 #include <linux/vmalloc.h>
15 #include <asm/efi.h>
16 #include <asm/io.h>
17 
18 typedef struct {
19 	u64 length;
20 	u64 data;
21 } efi_capsule_block_desc_t;
22 
23 static bool capsule_pending;
24 static bool stop_capsules;
25 static int efi_reset_type = -1;
26 
27 /*
28  * capsule_mutex serialises access to both capsule_pending and
29  * efi_reset_type and stop_capsules.
30  */
31 static DEFINE_MUTEX(capsule_mutex);
32 
33 /**
34  * efi_capsule_pending - has a capsule been passed to the firmware?
35  * @reset_type: store the type of EFI reset if capsule is pending
36  *
37  * To ensure that the registered capsule is processed correctly by the
38  * firmware we need to perform a specific type of reset. If a capsule is
39  * pending return the reset type in @reset_type.
40  *
41  * This function will race with callers of efi_capsule_update(), for
42  * example, calling this function while somebody else is in
43  * efi_capsule_update() but hasn't reached efi_capsue_update_locked()
44  * will miss the updates to capsule_pending and efi_reset_type after
45  * efi_capsule_update_locked() completes.
46  *
47  * A non-racy use is from platform reboot code because we use
48  * system_state to ensure no capsules can be sent to the firmware once
49  * we're at SYSTEM_RESTART. See efi_capsule_update_locked().
50  */
efi_capsule_pending(int * reset_type)51 bool efi_capsule_pending(int *reset_type)
52 {
53 	if (!capsule_pending)
54 		return false;
55 
56 	if (reset_type)
57 		*reset_type = efi_reset_type;
58 
59 	return true;
60 }
61 
62 /*
63  * Whitelist of EFI capsule flags that we support.
64  *
65  * We do not handle EFI_CAPSULE_INITIATE_RESET because that would
66  * require us to prepare the kernel for reboot. Refuse to load any
67  * capsules with that flag and any other flags that we do not know how
68  * to handle.
69  */
70 #define EFI_CAPSULE_SUPPORTED_FLAG_MASK			\
71 	(EFI_CAPSULE_PERSIST_ACROSS_RESET | EFI_CAPSULE_POPULATE_SYSTEM_TABLE)
72 
73 /**
74  * efi_capsule_supported - does the firmware support the capsule?
75  * @guid: vendor guid of capsule
76  * @flags: capsule flags
77  * @size: size of capsule data
78  * @reset: the reset type required for this capsule
79  *
80  * Check whether a capsule with @flags is supported by the firmware
81  * and that @size doesn't exceed the maximum size for a capsule.
82  *
83  * No attempt is made to check @reset against the reset type required
84  * by any pending capsules because of the races involved.
85  */
efi_capsule_supported(efi_guid_t guid,u32 flags,size_t size,int * reset)86 int efi_capsule_supported(efi_guid_t guid, u32 flags, size_t size, int *reset)
87 {
88 	efi_capsule_header_t capsule;
89 	efi_capsule_header_t *cap_list[] = { &capsule };
90 	efi_status_t status;
91 	u64 max_size;
92 
93 	if (flags & ~EFI_CAPSULE_SUPPORTED_FLAG_MASK)
94 		return -EINVAL;
95 
96 	capsule.headersize = capsule.imagesize = sizeof(capsule);
97 	memcpy(&capsule.guid, &guid, sizeof(efi_guid_t));
98 	capsule.flags = flags;
99 
100 	status = efi.query_capsule_caps(cap_list, 1, &max_size, reset);
101 	if (status != EFI_SUCCESS)
102 		return efi_status_to_err(status);
103 
104 	if (size > max_size)
105 		return -ENOSPC;
106 
107 	return 0;
108 }
109 EXPORT_SYMBOL_GPL(efi_capsule_supported);
110 
111 /*
112  * Every scatter gather list (block descriptor) page must end with a
113  * continuation pointer. The last continuation pointer of the last
114  * page must be zero to mark the end of the chain.
115  */
116 #define SGLIST_PER_PAGE	((PAGE_SIZE / sizeof(efi_capsule_block_desc_t)) - 1)
117 
118 /*
119  * How many scatter gather list (block descriptor) pages do we need
120  * to map @count pages?
121  */
sg_pages_num(unsigned int count)122 static inline unsigned int sg_pages_num(unsigned int count)
123 {
124 	return DIV_ROUND_UP(count, SGLIST_PER_PAGE);
125 }
126 
127 /**
128  * efi_capsule_update_locked - pass a single capsule to the firmware
129  * @capsule: capsule to send to the firmware
130  * @sg_pages: array of scatter gather (block descriptor) pages
131  * @reset: the reset type required for @capsule
132  *
133  * Since this function must be called under capsule_mutex check
134  * whether efi_reset_type will conflict with @reset, and atomically
135  * set it and capsule_pending if a capsule was successfully sent to
136  * the firmware.
137  *
138  * We also check to see if the system is about to restart, and if so,
139  * abort. This avoids races between efi_capsule_update() and
140  * efi_capsule_pending().
141  */
142 static int
efi_capsule_update_locked(efi_capsule_header_t * capsule,struct page ** sg_pages,int reset)143 efi_capsule_update_locked(efi_capsule_header_t *capsule,
144 			  struct page **sg_pages, int reset)
145 {
146 	efi_physical_addr_t sglist_phys;
147 	efi_status_t status;
148 
149 	lockdep_assert_held(&capsule_mutex);
150 
151 	/*
152 	 * If someone has already registered a capsule that requires a
153 	 * different reset type, we're out of luck and must abort.
154 	 */
155 	if (efi_reset_type >= 0 && efi_reset_type != reset) {
156 		pr_err("Conflicting capsule reset type %d (%d).\n",
157 		       reset, efi_reset_type);
158 		return -EINVAL;
159 	}
160 
161 	/*
162 	 * If the system is getting ready to restart it may have
163 	 * called efi_capsule_pending() to make decisions (such as
164 	 * whether to force an EFI reboot), and we're racing against
165 	 * that call. Abort in that case.
166 	 */
167 	if (unlikely(stop_capsules)) {
168 		pr_warn("Capsule update raced with reboot, aborting.\n");
169 		return -EINVAL;
170 	}
171 
172 	sglist_phys = page_to_phys(sg_pages[0]);
173 
174 	status = efi.update_capsule(&capsule, 1, sglist_phys);
175 	if (status == EFI_SUCCESS) {
176 		capsule_pending = true;
177 		efi_reset_type = reset;
178 	}
179 
180 	return efi_status_to_err(status);
181 }
182 
183 /**
184  * efi_capsule_update - send a capsule to the firmware
185  * @capsule: capsule to send to firmware
186  * @pages: an array of capsule data pages
187  *
188  * Build a scatter gather list with EFI capsule block descriptors to
189  * map the capsule described by @capsule with its data in @pages and
190  * send it to the firmware via the UpdateCapsule() runtime service.
191  *
192  * @capsule must be a virtual mapping of the complete capsule update in the
193  * kernel address space, as the capsule can be consumed immediately.
194  * A capsule_header_t that describes the entire contents of the capsule
195  * must be at the start of the first data page.
196  *
197  * Even though this function will validate that the firmware supports
198  * the capsule guid, users will likely want to check that
199  * efi_capsule_supported() returns true before calling this function
200  * because it makes it easier to print helpful error messages.
201  *
202  * If the capsule is successfully submitted to the firmware, any
203  * subsequent calls to efi_capsule_pending() will return true. @pages
204  * must not be released or modified if this function returns
205  * successfully.
206  *
207  * Callers must be prepared for this function to fail, which can
208  * happen if we raced with system reboot or if there is already a
209  * pending capsule that has a reset type that conflicts with the one
210  * required by @capsule. Do NOT use efi_capsule_pending() to detect
211  * this conflict since that would be racy. Instead, submit the capsule
212  * to efi_capsule_update() and check the return value.
213  *
214  * Return 0 on success, a converted EFI status code on failure.
215  */
efi_capsule_update(efi_capsule_header_t * capsule,phys_addr_t * pages)216 int efi_capsule_update(efi_capsule_header_t *capsule, phys_addr_t *pages)
217 {
218 	u32 imagesize = capsule->imagesize;
219 	efi_guid_t guid = capsule->guid;
220 	unsigned int count, sg_count;
221 	u32 flags = capsule->flags;
222 	struct page **sg_pages;
223 	int rv, reset_type;
224 	int i, j;
225 
226 	rv = efi_capsule_supported(guid, flags, imagesize, &reset_type);
227 	if (rv)
228 		return rv;
229 
230 	count = DIV_ROUND_UP(imagesize, PAGE_SIZE);
231 	sg_count = sg_pages_num(count);
232 
233 	sg_pages = kcalloc(sg_count, sizeof(*sg_pages), GFP_KERNEL);
234 	if (!sg_pages)
235 		return -ENOMEM;
236 
237 	for (i = 0; i < sg_count; i++) {
238 		sg_pages[i] = alloc_page(GFP_KERNEL);
239 		if (!sg_pages[i]) {
240 			rv = -ENOMEM;
241 			goto out;
242 		}
243 	}
244 
245 	for (i = 0; i < sg_count; i++) {
246 		efi_capsule_block_desc_t *sglist;
247 
248 		sglist = kmap_atomic(sg_pages[i]);
249 
250 		for (j = 0; j < SGLIST_PER_PAGE && count > 0; j++) {
251 			u64 sz = min_t(u64, imagesize,
252 				       PAGE_SIZE - (u64)*pages % PAGE_SIZE);
253 
254 			sglist[j].length = sz;
255 			sglist[j].data = *pages++;
256 
257 			imagesize -= sz;
258 			count--;
259 		}
260 
261 		/* Continuation pointer */
262 		sglist[j].length = 0;
263 
264 		if (i + 1 == sg_count)
265 			sglist[j].data = 0;
266 		else
267 			sglist[j].data = page_to_phys(sg_pages[i + 1]);
268 
269 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
270 		/*
271 		 * At runtime, the firmware has no way to find out where the
272 		 * sglist elements are mapped, if they are mapped in the first
273 		 * place. Therefore, on architectures that can only perform
274 		 * cache maintenance by virtual address, the firmware is unable
275 		 * to perform this maintenance, and so it is up to the OS to do
276 		 * it instead.
277 		 */
278 		efi_capsule_flush_cache_range(sglist, PAGE_SIZE);
279 #endif
280 		kunmap_atomic(sglist);
281 	}
282 
283 	mutex_lock(&capsule_mutex);
284 	rv = efi_capsule_update_locked(capsule, sg_pages, reset_type);
285 	mutex_unlock(&capsule_mutex);
286 
287 out:
288 	for (i = 0; rv && i < sg_count; i++) {
289 		if (sg_pages[i])
290 			__free_page(sg_pages[i]);
291 	}
292 
293 	kfree(sg_pages);
294 	return rv;
295 }
296 EXPORT_SYMBOL_GPL(efi_capsule_update);
297 
capsule_reboot_notify(struct notifier_block * nb,unsigned long event,void * cmd)298 static int capsule_reboot_notify(struct notifier_block *nb, unsigned long event, void *cmd)
299 {
300 	mutex_lock(&capsule_mutex);
301 	stop_capsules = true;
302 	mutex_unlock(&capsule_mutex);
303 
304 	return NOTIFY_DONE;
305 }
306 
307 static struct notifier_block capsule_reboot_nb = {
308 	.notifier_call = capsule_reboot_notify,
309 };
310 
capsule_reboot_register(void)311 static int __init capsule_reboot_register(void)
312 {
313 	return register_reboot_notifier(&capsule_reboot_nb);
314 }
315 core_initcall(capsule_reboot_register);
316