xref: /linux/drivers/virt/coco/sev-guest/sev-guest.c (revision 1e525507)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * AMD Secure Encrypted Virtualization (SEV) guest driver interface
4  *
5  * Copyright (C) 2021 Advanced Micro Devices, Inc.
6  *
7  * Author: Brijesh Singh <brijesh.singh@amd.com>
8  */
9 
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/mutex.h>
14 #include <linux/io.h>
15 #include <linux/platform_device.h>
16 #include <linux/miscdevice.h>
17 #include <linux/set_memory.h>
18 #include <linux/fs.h>
19 #include <linux/tsm.h>
20 #include <crypto/aead.h>
21 #include <linux/scatterlist.h>
22 #include <linux/psp-sev.h>
23 #include <linux/sockptr.h>
24 #include <linux/cleanup.h>
25 #include <linux/uuid.h>
26 #include <uapi/linux/sev-guest.h>
27 #include <uapi/linux/psp-sev.h>
28 
29 #include <asm/svm.h>
30 #include <asm/sev.h>
31 
32 #include "sev-guest.h"
33 
34 #define DEVICE_NAME	"sev-guest"
35 #define AAD_LEN		48
36 #define MSG_HDR_VER	1
37 
38 #define SNP_REQ_MAX_RETRY_DURATION	(60*HZ)
39 #define SNP_REQ_RETRY_DELAY		(2*HZ)
40 
41 struct snp_guest_crypto {
42 	struct crypto_aead *tfm;
43 	u8 *iv, *authtag;
44 	int iv_len, a_len;
45 };
46 
47 struct snp_guest_dev {
48 	struct device *dev;
49 	struct miscdevice misc;
50 
51 	void *certs_data;
52 	struct snp_guest_crypto *crypto;
53 	/* request and response are in unencrypted memory */
54 	struct snp_guest_msg *request, *response;
55 
56 	/*
57 	 * Avoid information leakage by double-buffering shared messages
58 	 * in fields that are in regular encrypted memory.
59 	 */
60 	struct snp_guest_msg secret_request, secret_response;
61 
62 	struct snp_secrets_page *secrets;
63 	struct snp_req_data input;
64 	union {
65 		struct snp_report_req report;
66 		struct snp_derived_key_req derived_key;
67 		struct snp_ext_report_req ext_report;
68 	} req;
69 	u32 *os_area_msg_seqno;
70 	u8 *vmpck;
71 };
72 
73 static u32 vmpck_id;
74 module_param(vmpck_id, uint, 0444);
75 MODULE_PARM_DESC(vmpck_id, "The VMPCK ID to use when communicating with the PSP.");
76 
77 /* Mutex to serialize the shared buffer access and command handling. */
78 static DEFINE_MUTEX(snp_cmd_mutex);
79 
is_vmpck_empty(struct snp_guest_dev * snp_dev)80 static bool is_vmpck_empty(struct snp_guest_dev *snp_dev)
81 {
82 	char zero_key[VMPCK_KEY_LEN] = {0};
83 
84 	if (snp_dev->vmpck)
85 		return !memcmp(snp_dev->vmpck, zero_key, VMPCK_KEY_LEN);
86 
87 	return true;
88 }
89 
90 /*
91  * If an error is received from the host or AMD Secure Processor (ASP) there
92  * are two options. Either retry the exact same encrypted request or discontinue
93  * using the VMPCK.
94  *
95  * This is because in the current encryption scheme GHCB v2 uses AES-GCM to
96  * encrypt the requests. The IV for this scheme is the sequence number. GCM
97  * cannot tolerate IV reuse.
98  *
99  * The ASP FW v1.51 only increments the sequence numbers on a successful
100  * guest<->ASP back and forth and only accepts messages at its exact sequence
101  * number.
102  *
103  * So if the sequence number were to be reused the encryption scheme is
104  * vulnerable. If the sequence number were incremented for a fresh IV the ASP
105  * will reject the request.
106  */
snp_disable_vmpck(struct snp_guest_dev * snp_dev)107 static void snp_disable_vmpck(struct snp_guest_dev *snp_dev)
108 {
109 	dev_alert(snp_dev->dev, "Disabling vmpck_id %d to prevent IV reuse.\n",
110 		  vmpck_id);
111 	memzero_explicit(snp_dev->vmpck, VMPCK_KEY_LEN);
112 	snp_dev->vmpck = NULL;
113 }
114 
__snp_get_msg_seqno(struct snp_guest_dev * snp_dev)115 static inline u64 __snp_get_msg_seqno(struct snp_guest_dev *snp_dev)
116 {
117 	u64 count;
118 
119 	lockdep_assert_held(&snp_cmd_mutex);
120 
121 	/* Read the current message sequence counter from secrets pages */
122 	count = *snp_dev->os_area_msg_seqno;
123 
124 	return count + 1;
125 }
126 
127 /* Return a non-zero on success */
snp_get_msg_seqno(struct snp_guest_dev * snp_dev)128 static u64 snp_get_msg_seqno(struct snp_guest_dev *snp_dev)
129 {
130 	u64 count = __snp_get_msg_seqno(snp_dev);
131 
132 	/*
133 	 * The message sequence counter for the SNP guest request is a  64-bit
134 	 * value but the version 2 of GHCB specification defines a 32-bit storage
135 	 * for it. If the counter exceeds the 32-bit value then return zero.
136 	 * The caller should check the return value, but if the caller happens to
137 	 * not check the value and use it, then the firmware treats zero as an
138 	 * invalid number and will fail the  message request.
139 	 */
140 	if (count >= UINT_MAX) {
141 		dev_err(snp_dev->dev, "request message sequence counter overflow\n");
142 		return 0;
143 	}
144 
145 	return count;
146 }
147 
snp_inc_msg_seqno(struct snp_guest_dev * snp_dev)148 static void snp_inc_msg_seqno(struct snp_guest_dev *snp_dev)
149 {
150 	/*
151 	 * The counter is also incremented by the PSP, so increment it by 2
152 	 * and save in secrets page.
153 	 */
154 	*snp_dev->os_area_msg_seqno += 2;
155 }
156 
to_snp_dev(struct file * file)157 static inline struct snp_guest_dev *to_snp_dev(struct file *file)
158 {
159 	struct miscdevice *dev = file->private_data;
160 
161 	return container_of(dev, struct snp_guest_dev, misc);
162 }
163 
init_crypto(struct snp_guest_dev * snp_dev,u8 * key,size_t keylen)164 static struct snp_guest_crypto *init_crypto(struct snp_guest_dev *snp_dev, u8 *key, size_t keylen)
165 {
166 	struct snp_guest_crypto *crypto;
167 
168 	crypto = kzalloc(sizeof(*crypto), GFP_KERNEL_ACCOUNT);
169 	if (!crypto)
170 		return NULL;
171 
172 	crypto->tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
173 	if (IS_ERR(crypto->tfm))
174 		goto e_free;
175 
176 	if (crypto_aead_setkey(crypto->tfm, key, keylen))
177 		goto e_free_crypto;
178 
179 	crypto->iv_len = crypto_aead_ivsize(crypto->tfm);
180 	crypto->iv = kmalloc(crypto->iv_len, GFP_KERNEL_ACCOUNT);
181 	if (!crypto->iv)
182 		goto e_free_crypto;
183 
184 	if (crypto_aead_authsize(crypto->tfm) > MAX_AUTHTAG_LEN) {
185 		if (crypto_aead_setauthsize(crypto->tfm, MAX_AUTHTAG_LEN)) {
186 			dev_err(snp_dev->dev, "failed to set authsize to %d\n", MAX_AUTHTAG_LEN);
187 			goto e_free_iv;
188 		}
189 	}
190 
191 	crypto->a_len = crypto_aead_authsize(crypto->tfm);
192 	crypto->authtag = kmalloc(crypto->a_len, GFP_KERNEL_ACCOUNT);
193 	if (!crypto->authtag)
194 		goto e_free_iv;
195 
196 	return crypto;
197 
198 e_free_iv:
199 	kfree(crypto->iv);
200 e_free_crypto:
201 	crypto_free_aead(crypto->tfm);
202 e_free:
203 	kfree(crypto);
204 
205 	return NULL;
206 }
207 
deinit_crypto(struct snp_guest_crypto * crypto)208 static void deinit_crypto(struct snp_guest_crypto *crypto)
209 {
210 	crypto_free_aead(crypto->tfm);
211 	kfree(crypto->iv);
212 	kfree(crypto->authtag);
213 	kfree(crypto);
214 }
215 
enc_dec_message(struct snp_guest_crypto * crypto,struct snp_guest_msg * msg,u8 * src_buf,u8 * dst_buf,size_t len,bool enc)216 static int enc_dec_message(struct snp_guest_crypto *crypto, struct snp_guest_msg *msg,
217 			   u8 *src_buf, u8 *dst_buf, size_t len, bool enc)
218 {
219 	struct snp_guest_msg_hdr *hdr = &msg->hdr;
220 	struct scatterlist src[3], dst[3];
221 	DECLARE_CRYPTO_WAIT(wait);
222 	struct aead_request *req;
223 	int ret;
224 
225 	req = aead_request_alloc(crypto->tfm, GFP_KERNEL);
226 	if (!req)
227 		return -ENOMEM;
228 
229 	/*
230 	 * AEAD memory operations:
231 	 * +------ AAD -------+------- DATA -----+---- AUTHTAG----+
232 	 * |  msg header      |  plaintext       |  hdr->authtag  |
233 	 * | bytes 30h - 5Fh  |    or            |                |
234 	 * |                  |   cipher         |                |
235 	 * +------------------+------------------+----------------+
236 	 */
237 	sg_init_table(src, 3);
238 	sg_set_buf(&src[0], &hdr->algo, AAD_LEN);
239 	sg_set_buf(&src[1], src_buf, hdr->msg_sz);
240 	sg_set_buf(&src[2], hdr->authtag, crypto->a_len);
241 
242 	sg_init_table(dst, 3);
243 	sg_set_buf(&dst[0], &hdr->algo, AAD_LEN);
244 	sg_set_buf(&dst[1], dst_buf, hdr->msg_sz);
245 	sg_set_buf(&dst[2], hdr->authtag, crypto->a_len);
246 
247 	aead_request_set_ad(req, AAD_LEN);
248 	aead_request_set_tfm(req, crypto->tfm);
249 	aead_request_set_callback(req, 0, crypto_req_done, &wait);
250 
251 	aead_request_set_crypt(req, src, dst, len, crypto->iv);
252 	ret = crypto_wait_req(enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req), &wait);
253 
254 	aead_request_free(req);
255 	return ret;
256 }
257 
__enc_payload(struct snp_guest_dev * snp_dev,struct snp_guest_msg * msg,void * plaintext,size_t len)258 static int __enc_payload(struct snp_guest_dev *snp_dev, struct snp_guest_msg *msg,
259 			 void *plaintext, size_t len)
260 {
261 	struct snp_guest_crypto *crypto = snp_dev->crypto;
262 	struct snp_guest_msg_hdr *hdr = &msg->hdr;
263 
264 	memset(crypto->iv, 0, crypto->iv_len);
265 	memcpy(crypto->iv, &hdr->msg_seqno, sizeof(hdr->msg_seqno));
266 
267 	return enc_dec_message(crypto, msg, plaintext, msg->payload, len, true);
268 }
269 
dec_payload(struct snp_guest_dev * snp_dev,struct snp_guest_msg * msg,void * plaintext,size_t len)270 static int dec_payload(struct snp_guest_dev *snp_dev, struct snp_guest_msg *msg,
271 		       void *plaintext, size_t len)
272 {
273 	struct snp_guest_crypto *crypto = snp_dev->crypto;
274 	struct snp_guest_msg_hdr *hdr = &msg->hdr;
275 
276 	/* Build IV with response buffer sequence number */
277 	memset(crypto->iv, 0, crypto->iv_len);
278 	memcpy(crypto->iv, &hdr->msg_seqno, sizeof(hdr->msg_seqno));
279 
280 	return enc_dec_message(crypto, msg, msg->payload, plaintext, len, false);
281 }
282 
verify_and_dec_payload(struct snp_guest_dev * snp_dev,void * payload,u32 sz)283 static int verify_and_dec_payload(struct snp_guest_dev *snp_dev, void *payload, u32 sz)
284 {
285 	struct snp_guest_crypto *crypto = snp_dev->crypto;
286 	struct snp_guest_msg *resp = &snp_dev->secret_response;
287 	struct snp_guest_msg *req = &snp_dev->secret_request;
288 	struct snp_guest_msg_hdr *req_hdr = &req->hdr;
289 	struct snp_guest_msg_hdr *resp_hdr = &resp->hdr;
290 
291 	dev_dbg(snp_dev->dev, "response [seqno %lld type %d version %d sz %d]\n",
292 		resp_hdr->msg_seqno, resp_hdr->msg_type, resp_hdr->msg_version, resp_hdr->msg_sz);
293 
294 	/* Copy response from shared memory to encrypted memory. */
295 	memcpy(resp, snp_dev->response, sizeof(*resp));
296 
297 	/* Verify that the sequence counter is incremented by 1 */
298 	if (unlikely(resp_hdr->msg_seqno != (req_hdr->msg_seqno + 1)))
299 		return -EBADMSG;
300 
301 	/* Verify response message type and version number. */
302 	if (resp_hdr->msg_type != (req_hdr->msg_type + 1) ||
303 	    resp_hdr->msg_version != req_hdr->msg_version)
304 		return -EBADMSG;
305 
306 	/*
307 	 * If the message size is greater than our buffer length then return
308 	 * an error.
309 	 */
310 	if (unlikely((resp_hdr->msg_sz + crypto->a_len) > sz))
311 		return -EBADMSG;
312 
313 	/* Decrypt the payload */
314 	return dec_payload(snp_dev, resp, payload, resp_hdr->msg_sz + crypto->a_len);
315 }
316 
enc_payload(struct snp_guest_dev * snp_dev,u64 seqno,int version,u8 type,void * payload,size_t sz)317 static int enc_payload(struct snp_guest_dev *snp_dev, u64 seqno, int version, u8 type,
318 			void *payload, size_t sz)
319 {
320 	struct snp_guest_msg *req = &snp_dev->secret_request;
321 	struct snp_guest_msg_hdr *hdr = &req->hdr;
322 
323 	memset(req, 0, sizeof(*req));
324 
325 	hdr->algo = SNP_AEAD_AES_256_GCM;
326 	hdr->hdr_version = MSG_HDR_VER;
327 	hdr->hdr_sz = sizeof(*hdr);
328 	hdr->msg_type = type;
329 	hdr->msg_version = version;
330 	hdr->msg_seqno = seqno;
331 	hdr->msg_vmpck = vmpck_id;
332 	hdr->msg_sz = sz;
333 
334 	/* Verify the sequence number is non-zero */
335 	if (!hdr->msg_seqno)
336 		return -ENOSR;
337 
338 	dev_dbg(snp_dev->dev, "request [seqno %lld type %d version %d sz %d]\n",
339 		hdr->msg_seqno, hdr->msg_type, hdr->msg_version, hdr->msg_sz);
340 
341 	return __enc_payload(snp_dev, req, payload, sz);
342 }
343 
__handle_guest_request(struct snp_guest_dev * snp_dev,u64 exit_code,struct snp_guest_request_ioctl * rio)344 static int __handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code,
345 				  struct snp_guest_request_ioctl *rio)
346 {
347 	unsigned long req_start = jiffies;
348 	unsigned int override_npages = 0;
349 	u64 override_err = 0;
350 	int rc;
351 
352 retry_request:
353 	/*
354 	 * Call firmware to process the request. In this function the encrypted
355 	 * message enters shared memory with the host. So after this call the
356 	 * sequence number must be incremented or the VMPCK must be deleted to
357 	 * prevent reuse of the IV.
358 	 */
359 	rc = snp_issue_guest_request(exit_code, &snp_dev->input, rio);
360 	switch (rc) {
361 	case -ENOSPC:
362 		/*
363 		 * If the extended guest request fails due to having too
364 		 * small of a certificate data buffer, retry the same
365 		 * guest request without the extended data request in
366 		 * order to increment the sequence number and thus avoid
367 		 * IV reuse.
368 		 */
369 		override_npages = snp_dev->input.data_npages;
370 		exit_code	= SVM_VMGEXIT_GUEST_REQUEST;
371 
372 		/*
373 		 * Override the error to inform callers the given extended
374 		 * request buffer size was too small and give the caller the
375 		 * required buffer size.
376 		 */
377 		override_err = SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN);
378 
379 		/*
380 		 * If this call to the firmware succeeds, the sequence number can
381 		 * be incremented allowing for continued use of the VMPCK. If
382 		 * there is an error reflected in the return value, this value
383 		 * is checked further down and the result will be the deletion
384 		 * of the VMPCK and the error code being propagated back to the
385 		 * user as an ioctl() return code.
386 		 */
387 		goto retry_request;
388 
389 	/*
390 	 * The host may return SNP_GUEST_VMM_ERR_BUSY if the request has been
391 	 * throttled. Retry in the driver to avoid returning and reusing the
392 	 * message sequence number on a different message.
393 	 */
394 	case -EAGAIN:
395 		if (jiffies - req_start > SNP_REQ_MAX_RETRY_DURATION) {
396 			rc = -ETIMEDOUT;
397 			break;
398 		}
399 		schedule_timeout_killable(SNP_REQ_RETRY_DELAY);
400 		goto retry_request;
401 	}
402 
403 	/*
404 	 * Increment the message sequence number. There is no harm in doing
405 	 * this now because decryption uses the value stored in the response
406 	 * structure and any failure will wipe the VMPCK, preventing further
407 	 * use anyway.
408 	 */
409 	snp_inc_msg_seqno(snp_dev);
410 
411 	if (override_err) {
412 		rio->exitinfo2 = override_err;
413 
414 		/*
415 		 * If an extended guest request was issued and the supplied certificate
416 		 * buffer was not large enough, a standard guest request was issued to
417 		 * prevent IV reuse. If the standard request was successful, return -EIO
418 		 * back to the caller as would have originally been returned.
419 		 */
420 		if (!rc && override_err == SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN))
421 			rc = -EIO;
422 	}
423 
424 	if (override_npages)
425 		snp_dev->input.data_npages = override_npages;
426 
427 	return rc;
428 }
429 
handle_guest_request(struct snp_guest_dev * snp_dev,u64 exit_code,struct snp_guest_request_ioctl * rio,u8 type,void * req_buf,size_t req_sz,void * resp_buf,u32 resp_sz)430 static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code,
431 				struct snp_guest_request_ioctl *rio, u8 type,
432 				void *req_buf, size_t req_sz, void *resp_buf,
433 				u32 resp_sz)
434 {
435 	u64 seqno;
436 	int rc;
437 
438 	/* Get message sequence and verify that its a non-zero */
439 	seqno = snp_get_msg_seqno(snp_dev);
440 	if (!seqno)
441 		return -EIO;
442 
443 	/* Clear shared memory's response for the host to populate. */
444 	memset(snp_dev->response, 0, sizeof(struct snp_guest_msg));
445 
446 	/* Encrypt the userspace provided payload in snp_dev->secret_request. */
447 	rc = enc_payload(snp_dev, seqno, rio->msg_version, type, req_buf, req_sz);
448 	if (rc)
449 		return rc;
450 
451 	/*
452 	 * Write the fully encrypted request to the shared unencrypted
453 	 * request page.
454 	 */
455 	memcpy(snp_dev->request, &snp_dev->secret_request,
456 	       sizeof(snp_dev->secret_request));
457 
458 	rc = __handle_guest_request(snp_dev, exit_code, rio);
459 	if (rc) {
460 		if (rc == -EIO &&
461 		    rio->exitinfo2 == SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN))
462 			return rc;
463 
464 		dev_alert(snp_dev->dev,
465 			  "Detected error from ASP request. rc: %d, exitinfo2: 0x%llx\n",
466 			  rc, rio->exitinfo2);
467 
468 		snp_disable_vmpck(snp_dev);
469 		return rc;
470 	}
471 
472 	rc = verify_and_dec_payload(snp_dev, resp_buf, resp_sz);
473 	if (rc) {
474 		dev_alert(snp_dev->dev, "Detected unexpected decode failure from ASP. rc: %d\n", rc);
475 		snp_disable_vmpck(snp_dev);
476 		return rc;
477 	}
478 
479 	return 0;
480 }
481 
482 struct snp_req_resp {
483 	sockptr_t req_data;
484 	sockptr_t resp_data;
485 };
486 
get_report(struct snp_guest_dev * snp_dev,struct snp_guest_request_ioctl * arg)487 static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
488 {
489 	struct snp_guest_crypto *crypto = snp_dev->crypto;
490 	struct snp_report_req *req = &snp_dev->req.report;
491 	struct snp_report_resp *resp;
492 	int rc, resp_len;
493 
494 	lockdep_assert_held(&snp_cmd_mutex);
495 
496 	if (!arg->req_data || !arg->resp_data)
497 		return -EINVAL;
498 
499 	if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req)))
500 		return -EFAULT;
501 
502 	/*
503 	 * The intermediate response buffer is used while decrypting the
504 	 * response payload. Make sure that it has enough space to cover the
505 	 * authtag.
506 	 */
507 	resp_len = sizeof(resp->data) + crypto->a_len;
508 	resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);
509 	if (!resp)
510 		return -ENOMEM;
511 
512 	rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg,
513 				  SNP_MSG_REPORT_REQ, req, sizeof(*req), resp->data,
514 				  resp_len);
515 	if (rc)
516 		goto e_free;
517 
518 	if (copy_to_user((void __user *)arg->resp_data, resp, sizeof(*resp)))
519 		rc = -EFAULT;
520 
521 e_free:
522 	kfree(resp);
523 	return rc;
524 }
525 
get_derived_key(struct snp_guest_dev * snp_dev,struct snp_guest_request_ioctl * arg)526 static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
527 {
528 	struct snp_derived_key_req *req = &snp_dev->req.derived_key;
529 	struct snp_guest_crypto *crypto = snp_dev->crypto;
530 	struct snp_derived_key_resp resp = {0};
531 	int rc, resp_len;
532 	/* Response data is 64 bytes and max authsize for GCM is 16 bytes. */
533 	u8 buf[64 + 16];
534 
535 	lockdep_assert_held(&snp_cmd_mutex);
536 
537 	if (!arg->req_data || !arg->resp_data)
538 		return -EINVAL;
539 
540 	/*
541 	 * The intermediate response buffer is used while decrypting the
542 	 * response payload. Make sure that it has enough space to cover the
543 	 * authtag.
544 	 */
545 	resp_len = sizeof(resp.data) + crypto->a_len;
546 	if (sizeof(buf) < resp_len)
547 		return -ENOMEM;
548 
549 	if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req)))
550 		return -EFAULT;
551 
552 	rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg,
553 				  SNP_MSG_KEY_REQ, req, sizeof(*req), buf, resp_len);
554 	if (rc)
555 		return rc;
556 
557 	memcpy(resp.data, buf, sizeof(resp.data));
558 	if (copy_to_user((void __user *)arg->resp_data, &resp, sizeof(resp)))
559 		rc = -EFAULT;
560 
561 	/* The response buffer contains the sensitive data, explicitly clear it. */
562 	memzero_explicit(buf, sizeof(buf));
563 	memzero_explicit(&resp, sizeof(resp));
564 	return rc;
565 }
566 
get_ext_report(struct snp_guest_dev * snp_dev,struct snp_guest_request_ioctl * arg,struct snp_req_resp * io)567 static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg,
568 			  struct snp_req_resp *io)
569 
570 {
571 	struct snp_ext_report_req *req = &snp_dev->req.ext_report;
572 	struct snp_guest_crypto *crypto = snp_dev->crypto;
573 	struct snp_report_resp *resp;
574 	int ret, npages = 0, resp_len;
575 	sockptr_t certs_address;
576 
577 	lockdep_assert_held(&snp_cmd_mutex);
578 
579 	if (sockptr_is_null(io->req_data) || sockptr_is_null(io->resp_data))
580 		return -EINVAL;
581 
582 	if (copy_from_sockptr(req, io->req_data, sizeof(*req)))
583 		return -EFAULT;
584 
585 	/* caller does not want certificate data */
586 	if (!req->certs_len || !req->certs_address)
587 		goto cmd;
588 
589 	if (req->certs_len > SEV_FW_BLOB_MAX_SIZE ||
590 	    !IS_ALIGNED(req->certs_len, PAGE_SIZE))
591 		return -EINVAL;
592 
593 	if (sockptr_is_kernel(io->resp_data)) {
594 		certs_address = KERNEL_SOCKPTR((void *)req->certs_address);
595 	} else {
596 		certs_address = USER_SOCKPTR((void __user *)req->certs_address);
597 		if (!access_ok(certs_address.user, req->certs_len))
598 			return -EFAULT;
599 	}
600 
601 	/*
602 	 * Initialize the intermediate buffer with all zeros. This buffer
603 	 * is used in the guest request message to get the certs blob from
604 	 * the host. If host does not supply any certs in it, then copy
605 	 * zeros to indicate that certificate data was not provided.
606 	 */
607 	memset(snp_dev->certs_data, 0, req->certs_len);
608 	npages = req->certs_len >> PAGE_SHIFT;
609 cmd:
610 	/*
611 	 * The intermediate response buffer is used while decrypting the
612 	 * response payload. Make sure that it has enough space to cover the
613 	 * authtag.
614 	 */
615 	resp_len = sizeof(resp->data) + crypto->a_len;
616 	resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);
617 	if (!resp)
618 		return -ENOMEM;
619 
620 	snp_dev->input.data_npages = npages;
621 	ret = handle_guest_request(snp_dev, SVM_VMGEXIT_EXT_GUEST_REQUEST, arg,
622 				   SNP_MSG_REPORT_REQ, &req->data,
623 				   sizeof(req->data), resp->data, resp_len);
624 
625 	/* If certs length is invalid then copy the returned length */
626 	if (arg->vmm_error == SNP_GUEST_VMM_ERR_INVALID_LEN) {
627 		req->certs_len = snp_dev->input.data_npages << PAGE_SHIFT;
628 
629 		if (copy_to_sockptr(io->req_data, req, sizeof(*req)))
630 			ret = -EFAULT;
631 	}
632 
633 	if (ret)
634 		goto e_free;
635 
636 	if (npages && copy_to_sockptr(certs_address, snp_dev->certs_data, req->certs_len)) {
637 		ret = -EFAULT;
638 		goto e_free;
639 	}
640 
641 	if (copy_to_sockptr(io->resp_data, resp, sizeof(*resp)))
642 		ret = -EFAULT;
643 
644 e_free:
645 	kfree(resp);
646 	return ret;
647 }
648 
snp_guest_ioctl(struct file * file,unsigned int ioctl,unsigned long arg)649 static long snp_guest_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
650 {
651 	struct snp_guest_dev *snp_dev = to_snp_dev(file);
652 	void __user *argp = (void __user *)arg;
653 	struct snp_guest_request_ioctl input;
654 	struct snp_req_resp io;
655 	int ret = -ENOTTY;
656 
657 	if (copy_from_user(&input, argp, sizeof(input)))
658 		return -EFAULT;
659 
660 	input.exitinfo2 = 0xff;
661 
662 	/* Message version must be non-zero */
663 	if (!input.msg_version)
664 		return -EINVAL;
665 
666 	mutex_lock(&snp_cmd_mutex);
667 
668 	/* Check if the VMPCK is not empty */
669 	if (is_vmpck_empty(snp_dev)) {
670 		dev_err_ratelimited(snp_dev->dev, "VMPCK is disabled\n");
671 		mutex_unlock(&snp_cmd_mutex);
672 		return -ENOTTY;
673 	}
674 
675 	switch (ioctl) {
676 	case SNP_GET_REPORT:
677 		ret = get_report(snp_dev, &input);
678 		break;
679 	case SNP_GET_DERIVED_KEY:
680 		ret = get_derived_key(snp_dev, &input);
681 		break;
682 	case SNP_GET_EXT_REPORT:
683 		/*
684 		 * As get_ext_report() may be called from the ioctl() path and a
685 		 * kernel internal path (configfs-tsm), decorate the passed
686 		 * buffers as user pointers.
687 		 */
688 		io.req_data = USER_SOCKPTR((void __user *)input.req_data);
689 		io.resp_data = USER_SOCKPTR((void __user *)input.resp_data);
690 		ret = get_ext_report(snp_dev, &input, &io);
691 		break;
692 	default:
693 		break;
694 	}
695 
696 	mutex_unlock(&snp_cmd_mutex);
697 
698 	if (input.exitinfo2 && copy_to_user(argp, &input, sizeof(input)))
699 		return -EFAULT;
700 
701 	return ret;
702 }
703 
free_shared_pages(void * buf,size_t sz)704 static void free_shared_pages(void *buf, size_t sz)
705 {
706 	unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
707 	int ret;
708 
709 	if (!buf)
710 		return;
711 
712 	ret = set_memory_encrypted((unsigned long)buf, npages);
713 	if (ret) {
714 		WARN_ONCE(ret, "failed to restore encryption mask (leak it)\n");
715 		return;
716 	}
717 
718 	__free_pages(virt_to_page(buf), get_order(sz));
719 }
720 
alloc_shared_pages(struct device * dev,size_t sz)721 static void *alloc_shared_pages(struct device *dev, size_t sz)
722 {
723 	unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
724 	struct page *page;
725 	int ret;
726 
727 	page = alloc_pages(GFP_KERNEL_ACCOUNT, get_order(sz));
728 	if (!page)
729 		return NULL;
730 
731 	ret = set_memory_decrypted((unsigned long)page_address(page), npages);
732 	if (ret) {
733 		dev_err(dev, "failed to mark page shared, ret=%d\n", ret);
734 		__free_pages(page, get_order(sz));
735 		return NULL;
736 	}
737 
738 	return page_address(page);
739 }
740 
741 static const struct file_operations snp_guest_fops = {
742 	.owner	= THIS_MODULE,
743 	.unlocked_ioctl = snp_guest_ioctl,
744 };
745 
get_vmpck(int id,struct snp_secrets_page * secrets,u32 ** seqno)746 static u8 *get_vmpck(int id, struct snp_secrets_page *secrets, u32 **seqno)
747 {
748 	u8 *key = NULL;
749 
750 	switch (id) {
751 	case 0:
752 		*seqno = &secrets->os_area.msg_seqno_0;
753 		key = secrets->vmpck0;
754 		break;
755 	case 1:
756 		*seqno = &secrets->os_area.msg_seqno_1;
757 		key = secrets->vmpck1;
758 		break;
759 	case 2:
760 		*seqno = &secrets->os_area.msg_seqno_2;
761 		key = secrets->vmpck2;
762 		break;
763 	case 3:
764 		*seqno = &secrets->os_area.msg_seqno_3;
765 		key = secrets->vmpck3;
766 		break;
767 	default:
768 		break;
769 	}
770 
771 	return key;
772 }
773 
774 struct snp_msg_report_resp_hdr {
775 	u32 status;
776 	u32 report_size;
777 	u8 rsvd[24];
778 };
779 
780 struct snp_msg_cert_entry {
781 	guid_t guid;
782 	u32 offset;
783 	u32 length;
784 };
785 
sev_report_new(struct tsm_report * report,void * data)786 static int sev_report_new(struct tsm_report *report, void *data)
787 {
788 	struct snp_msg_cert_entry *cert_table;
789 	struct tsm_desc *desc = &report->desc;
790 	struct snp_guest_dev *snp_dev = data;
791 	struct snp_msg_report_resp_hdr hdr;
792 	const u32 report_size = SZ_4K;
793 	const u32 ext_size = SEV_FW_BLOB_MAX_SIZE;
794 	u32 certs_size, i, size = report_size + ext_size;
795 	int ret;
796 
797 	if (desc->inblob_len != SNP_REPORT_USER_DATA_SIZE)
798 		return -EINVAL;
799 
800 	void *buf __free(kvfree) = kvzalloc(size, GFP_KERNEL);
801 	if (!buf)
802 		return -ENOMEM;
803 
804 	guard(mutex)(&snp_cmd_mutex);
805 
806 	/* Check if the VMPCK is not empty */
807 	if (is_vmpck_empty(snp_dev)) {
808 		dev_err_ratelimited(snp_dev->dev, "VMPCK is disabled\n");
809 		return -ENOTTY;
810 	}
811 
812 	cert_table = buf + report_size;
813 	struct snp_ext_report_req ext_req = {
814 		.data = { .vmpl = desc->privlevel },
815 		.certs_address = (__u64)cert_table,
816 		.certs_len = ext_size,
817 	};
818 	memcpy(&ext_req.data.user_data, desc->inblob, desc->inblob_len);
819 
820 	struct snp_guest_request_ioctl input = {
821 		.msg_version = 1,
822 		.req_data = (__u64)&ext_req,
823 		.resp_data = (__u64)buf,
824 		.exitinfo2 = 0xff,
825 	};
826 	struct snp_req_resp io = {
827 		.req_data = KERNEL_SOCKPTR(&ext_req),
828 		.resp_data = KERNEL_SOCKPTR(buf),
829 	};
830 
831 	ret = get_ext_report(snp_dev, &input, &io);
832 	if (ret)
833 		return ret;
834 
835 	memcpy(&hdr, buf, sizeof(hdr));
836 	if (hdr.status == SEV_RET_INVALID_PARAM)
837 		return -EINVAL;
838 	if (hdr.status == SEV_RET_INVALID_KEY)
839 		return -EINVAL;
840 	if (hdr.status)
841 		return -ENXIO;
842 	if ((hdr.report_size + sizeof(hdr)) > report_size)
843 		return -ENOMEM;
844 
845 	void *rbuf __free(kvfree) = kvzalloc(hdr.report_size, GFP_KERNEL);
846 	if (!rbuf)
847 		return -ENOMEM;
848 
849 	memcpy(rbuf, buf + sizeof(hdr), hdr.report_size);
850 	report->outblob = no_free_ptr(rbuf);
851 	report->outblob_len = hdr.report_size;
852 
853 	certs_size = 0;
854 	for (i = 0; i < ext_size / sizeof(struct snp_msg_cert_entry); i++) {
855 		struct snp_msg_cert_entry *ent = &cert_table[i];
856 
857 		if (guid_is_null(&ent->guid) && !ent->offset && !ent->length)
858 			break;
859 		certs_size = max(certs_size, ent->offset + ent->length);
860 	}
861 
862 	/* Suspicious that the response populated entries without populating size */
863 	if (!certs_size && i)
864 		dev_warn_ratelimited(snp_dev->dev, "certificate slots conveyed without size\n");
865 
866 	/* No certs to report */
867 	if (!certs_size)
868 		return 0;
869 
870 	/* Suspicious that the certificate blob size contract was violated
871 	 */
872 	if (certs_size > ext_size) {
873 		dev_warn_ratelimited(snp_dev->dev, "certificate data truncated\n");
874 		certs_size = ext_size;
875 	}
876 
877 	void *cbuf __free(kvfree) = kvzalloc(certs_size, GFP_KERNEL);
878 	if (!cbuf)
879 		return -ENOMEM;
880 
881 	memcpy(cbuf, cert_table, certs_size);
882 	report->auxblob = no_free_ptr(cbuf);
883 	report->auxblob_len = certs_size;
884 
885 	return 0;
886 }
887 
888 static const struct tsm_ops sev_tsm_ops = {
889 	.name = KBUILD_MODNAME,
890 	.report_new = sev_report_new,
891 };
892 
unregister_sev_tsm(void * data)893 static void unregister_sev_tsm(void *data)
894 {
895 	tsm_unregister(&sev_tsm_ops);
896 }
897 
sev_guest_probe(struct platform_device * pdev)898 static int __init sev_guest_probe(struct platform_device *pdev)
899 {
900 	struct sev_guest_platform_data *data;
901 	struct snp_secrets_page *secrets;
902 	struct device *dev = &pdev->dev;
903 	struct snp_guest_dev *snp_dev;
904 	struct miscdevice *misc;
905 	void __iomem *mapping;
906 	int ret;
907 
908 	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
909 		return -ENODEV;
910 
911 	if (!dev->platform_data)
912 		return -ENODEV;
913 
914 	data = (struct sev_guest_platform_data *)dev->platform_data;
915 	mapping = ioremap_encrypted(data->secrets_gpa, PAGE_SIZE);
916 	if (!mapping)
917 		return -ENODEV;
918 
919 	secrets = (__force void *)mapping;
920 
921 	ret = -ENOMEM;
922 	snp_dev = devm_kzalloc(&pdev->dev, sizeof(struct snp_guest_dev), GFP_KERNEL);
923 	if (!snp_dev)
924 		goto e_unmap;
925 
926 	ret = -EINVAL;
927 	snp_dev->vmpck = get_vmpck(vmpck_id, secrets, &snp_dev->os_area_msg_seqno);
928 	if (!snp_dev->vmpck) {
929 		dev_err(dev, "invalid vmpck id %d\n", vmpck_id);
930 		goto e_unmap;
931 	}
932 
933 	/* Verify that VMPCK is not zero. */
934 	if (is_vmpck_empty(snp_dev)) {
935 		dev_err(dev, "vmpck id %d is null\n", vmpck_id);
936 		goto e_unmap;
937 	}
938 
939 	platform_set_drvdata(pdev, snp_dev);
940 	snp_dev->dev = dev;
941 	snp_dev->secrets = secrets;
942 
943 	/* Allocate the shared page used for the request and response message. */
944 	snp_dev->request = alloc_shared_pages(dev, sizeof(struct snp_guest_msg));
945 	if (!snp_dev->request)
946 		goto e_unmap;
947 
948 	snp_dev->response = alloc_shared_pages(dev, sizeof(struct snp_guest_msg));
949 	if (!snp_dev->response)
950 		goto e_free_request;
951 
952 	snp_dev->certs_data = alloc_shared_pages(dev, SEV_FW_BLOB_MAX_SIZE);
953 	if (!snp_dev->certs_data)
954 		goto e_free_response;
955 
956 	ret = -EIO;
957 	snp_dev->crypto = init_crypto(snp_dev, snp_dev->vmpck, VMPCK_KEY_LEN);
958 	if (!snp_dev->crypto)
959 		goto e_free_cert_data;
960 
961 	misc = &snp_dev->misc;
962 	misc->minor = MISC_DYNAMIC_MINOR;
963 	misc->name = DEVICE_NAME;
964 	misc->fops = &snp_guest_fops;
965 
966 	/* initial the input address for guest request */
967 	snp_dev->input.req_gpa = __pa(snp_dev->request);
968 	snp_dev->input.resp_gpa = __pa(snp_dev->response);
969 	snp_dev->input.data_gpa = __pa(snp_dev->certs_data);
970 
971 	ret = tsm_register(&sev_tsm_ops, snp_dev, &tsm_report_extra_type);
972 	if (ret)
973 		goto e_free_cert_data;
974 
975 	ret = devm_add_action_or_reset(&pdev->dev, unregister_sev_tsm, NULL);
976 	if (ret)
977 		goto e_free_cert_data;
978 
979 	ret =  misc_register(misc);
980 	if (ret)
981 		goto e_free_cert_data;
982 
983 	dev_info(dev, "Initialized SEV guest driver (using vmpck_id %d)\n", vmpck_id);
984 	return 0;
985 
986 e_free_cert_data:
987 	free_shared_pages(snp_dev->certs_data, SEV_FW_BLOB_MAX_SIZE);
988 e_free_response:
989 	free_shared_pages(snp_dev->response, sizeof(struct snp_guest_msg));
990 e_free_request:
991 	free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg));
992 e_unmap:
993 	iounmap(mapping);
994 	return ret;
995 }
996 
sev_guest_remove(struct platform_device * pdev)997 static void __exit sev_guest_remove(struct platform_device *pdev)
998 {
999 	struct snp_guest_dev *snp_dev = platform_get_drvdata(pdev);
1000 
1001 	free_shared_pages(snp_dev->certs_data, SEV_FW_BLOB_MAX_SIZE);
1002 	free_shared_pages(snp_dev->response, sizeof(struct snp_guest_msg));
1003 	free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg));
1004 	deinit_crypto(snp_dev->crypto);
1005 	misc_deregister(&snp_dev->misc);
1006 }
1007 
1008 /*
1009  * This driver is meant to be a common SEV guest interface driver and to
1010  * support any SEV guest API. As such, even though it has been introduced
1011  * with the SEV-SNP support, it is named "sev-guest".
1012  */
1013 static struct platform_driver sev_guest_driver = {
1014 	.remove_new	= __exit_p(sev_guest_remove),
1015 	.driver		= {
1016 		.name = "sev-guest",
1017 	},
1018 };
1019 
1020 module_platform_driver_probe(sev_guest_driver, sev_guest_probe);
1021 
1022 MODULE_AUTHOR("Brijesh Singh <brijesh.singh@amd.com>");
1023 MODULE_LICENSE("GPL");
1024 MODULE_VERSION("1.0.0");
1025 MODULE_DESCRIPTION("AMD SEV Guest Driver");
1026 MODULE_ALIAS("platform:sev-guest");
1027