xref: /linux/drivers/virt/coco/sev-guest/sev-guest.c (revision 96500610)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * AMD Secure Encrypted Virtualization (SEV) guest driver interface
4  *
5  * Copyright (C) 2021 Advanced Micro Devices, Inc.
6  *
7  * Author: Brijesh Singh <brijesh.singh@amd.com>
8  */
9 
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/mutex.h>
14 #include <linux/io.h>
15 #include <linux/platform_device.h>
16 #include <linux/miscdevice.h>
17 #include <linux/set_memory.h>
18 #include <linux/fs.h>
19 #include <crypto/aead.h>
20 #include <linux/scatterlist.h>
21 #include <linux/psp-sev.h>
22 #include <uapi/linux/sev-guest.h>
23 #include <uapi/linux/psp-sev.h>
24 
25 #include <asm/svm.h>
26 #include <asm/sev.h>
27 
28 #include "sev-guest.h"
29 
30 #define DEVICE_NAME	"sev-guest"
31 #define AAD_LEN		48
32 #define MSG_HDR_VER	1
33 
34 #define SNP_REQ_MAX_RETRY_DURATION	(60*HZ)
35 #define SNP_REQ_RETRY_DELAY		(2*HZ)
36 
37 struct snp_guest_crypto {
38 	struct crypto_aead *tfm;
39 	u8 *iv, *authtag;
40 	int iv_len, a_len;
41 };
42 
43 struct snp_guest_dev {
44 	struct device *dev;
45 	struct miscdevice misc;
46 
47 	void *certs_data;
48 	struct snp_guest_crypto *crypto;
49 	/* request and response are in unencrypted memory */
50 	struct snp_guest_msg *request, *response;
51 
52 	/*
53 	 * Avoid information leakage by double-buffering shared messages
54 	 * in fields that are in regular encrypted memory.
55 	 */
56 	struct snp_guest_msg secret_request, secret_response;
57 
58 	struct snp_secrets_page_layout *layout;
59 	struct snp_req_data input;
60 	u32 *os_area_msg_seqno;
61 	u8 *vmpck;
62 };
63 
64 static u32 vmpck_id;
65 module_param(vmpck_id, uint, 0444);
66 MODULE_PARM_DESC(vmpck_id, "The VMPCK ID to use when communicating with the PSP.");
67 
68 /* Mutex to serialize the shared buffer access and command handling. */
69 static DEFINE_MUTEX(snp_cmd_mutex);
70 
71 static bool is_vmpck_empty(struct snp_guest_dev *snp_dev)
72 {
73 	char zero_key[VMPCK_KEY_LEN] = {0};
74 
75 	if (snp_dev->vmpck)
76 		return !memcmp(snp_dev->vmpck, zero_key, VMPCK_KEY_LEN);
77 
78 	return true;
79 }
80 
81 /*
82  * If an error is received from the host or AMD Secure Processor (ASP) there
83  * are two options. Either retry the exact same encrypted request or discontinue
84  * using the VMPCK.
85  *
86  * This is because in the current encryption scheme GHCB v2 uses AES-GCM to
87  * encrypt the requests. The IV for this scheme is the sequence number. GCM
88  * cannot tolerate IV reuse.
89  *
90  * The ASP FW v1.51 only increments the sequence numbers on a successful
91  * guest<->ASP back and forth and only accepts messages at its exact sequence
92  * number.
93  *
94  * So if the sequence number were to be reused the encryption scheme is
95  * vulnerable. If the sequence number were incremented for a fresh IV the ASP
96  * will reject the request.
97  */
98 static void snp_disable_vmpck(struct snp_guest_dev *snp_dev)
99 {
100 	dev_alert(snp_dev->dev, "Disabling vmpck_id %d to prevent IV reuse.\n",
101 		  vmpck_id);
102 	memzero_explicit(snp_dev->vmpck, VMPCK_KEY_LEN);
103 	snp_dev->vmpck = NULL;
104 }
105 
106 static inline u64 __snp_get_msg_seqno(struct snp_guest_dev *snp_dev)
107 {
108 	u64 count;
109 
110 	lockdep_assert_held(&snp_cmd_mutex);
111 
112 	/* Read the current message sequence counter from secrets pages */
113 	count = *snp_dev->os_area_msg_seqno;
114 
115 	return count + 1;
116 }
117 
118 /* Return a non-zero on success */
119 static u64 snp_get_msg_seqno(struct snp_guest_dev *snp_dev)
120 {
121 	u64 count = __snp_get_msg_seqno(snp_dev);
122 
123 	/*
124 	 * The message sequence counter for the SNP guest request is a  64-bit
125 	 * value but the version 2 of GHCB specification defines a 32-bit storage
126 	 * for it. If the counter exceeds the 32-bit value then return zero.
127 	 * The caller should check the return value, but if the caller happens to
128 	 * not check the value and use it, then the firmware treats zero as an
129 	 * invalid number and will fail the  message request.
130 	 */
131 	if (count >= UINT_MAX) {
132 		dev_err(snp_dev->dev, "request message sequence counter overflow\n");
133 		return 0;
134 	}
135 
136 	return count;
137 }
138 
139 static void snp_inc_msg_seqno(struct snp_guest_dev *snp_dev)
140 {
141 	/*
142 	 * The counter is also incremented by the PSP, so increment it by 2
143 	 * and save in secrets page.
144 	 */
145 	*snp_dev->os_area_msg_seqno += 2;
146 }
147 
148 static inline struct snp_guest_dev *to_snp_dev(struct file *file)
149 {
150 	struct miscdevice *dev = file->private_data;
151 
152 	return container_of(dev, struct snp_guest_dev, misc);
153 }
154 
155 static struct snp_guest_crypto *init_crypto(struct snp_guest_dev *snp_dev, u8 *key, size_t keylen)
156 {
157 	struct snp_guest_crypto *crypto;
158 
159 	crypto = kzalloc(sizeof(*crypto), GFP_KERNEL_ACCOUNT);
160 	if (!crypto)
161 		return NULL;
162 
163 	crypto->tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
164 	if (IS_ERR(crypto->tfm))
165 		goto e_free;
166 
167 	if (crypto_aead_setkey(crypto->tfm, key, keylen))
168 		goto e_free_crypto;
169 
170 	crypto->iv_len = crypto_aead_ivsize(crypto->tfm);
171 	crypto->iv = kmalloc(crypto->iv_len, GFP_KERNEL_ACCOUNT);
172 	if (!crypto->iv)
173 		goto e_free_crypto;
174 
175 	if (crypto_aead_authsize(crypto->tfm) > MAX_AUTHTAG_LEN) {
176 		if (crypto_aead_setauthsize(crypto->tfm, MAX_AUTHTAG_LEN)) {
177 			dev_err(snp_dev->dev, "failed to set authsize to %d\n", MAX_AUTHTAG_LEN);
178 			goto e_free_iv;
179 		}
180 	}
181 
182 	crypto->a_len = crypto_aead_authsize(crypto->tfm);
183 	crypto->authtag = kmalloc(crypto->a_len, GFP_KERNEL_ACCOUNT);
184 	if (!crypto->authtag)
185 		goto e_free_iv;
186 
187 	return crypto;
188 
189 e_free_iv:
190 	kfree(crypto->iv);
191 e_free_crypto:
192 	crypto_free_aead(crypto->tfm);
193 e_free:
194 	kfree(crypto);
195 
196 	return NULL;
197 }
198 
199 static void deinit_crypto(struct snp_guest_crypto *crypto)
200 {
201 	crypto_free_aead(crypto->tfm);
202 	kfree(crypto->iv);
203 	kfree(crypto->authtag);
204 	kfree(crypto);
205 }
206 
207 static int enc_dec_message(struct snp_guest_crypto *crypto, struct snp_guest_msg *msg,
208 			   u8 *src_buf, u8 *dst_buf, size_t len, bool enc)
209 {
210 	struct snp_guest_msg_hdr *hdr = &msg->hdr;
211 	struct scatterlist src[3], dst[3];
212 	DECLARE_CRYPTO_WAIT(wait);
213 	struct aead_request *req;
214 	int ret;
215 
216 	req = aead_request_alloc(crypto->tfm, GFP_KERNEL);
217 	if (!req)
218 		return -ENOMEM;
219 
220 	/*
221 	 * AEAD memory operations:
222 	 * +------ AAD -------+------- DATA -----+---- AUTHTAG----+
223 	 * |  msg header      |  plaintext       |  hdr->authtag  |
224 	 * | bytes 30h - 5Fh  |    or            |                |
225 	 * |                  |   cipher         |                |
226 	 * +------------------+------------------+----------------+
227 	 */
228 	sg_init_table(src, 3);
229 	sg_set_buf(&src[0], &hdr->algo, AAD_LEN);
230 	sg_set_buf(&src[1], src_buf, hdr->msg_sz);
231 	sg_set_buf(&src[2], hdr->authtag, crypto->a_len);
232 
233 	sg_init_table(dst, 3);
234 	sg_set_buf(&dst[0], &hdr->algo, AAD_LEN);
235 	sg_set_buf(&dst[1], dst_buf, hdr->msg_sz);
236 	sg_set_buf(&dst[2], hdr->authtag, crypto->a_len);
237 
238 	aead_request_set_ad(req, AAD_LEN);
239 	aead_request_set_tfm(req, crypto->tfm);
240 	aead_request_set_callback(req, 0, crypto_req_done, &wait);
241 
242 	aead_request_set_crypt(req, src, dst, len, crypto->iv);
243 	ret = crypto_wait_req(enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req), &wait);
244 
245 	aead_request_free(req);
246 	return ret;
247 }
248 
249 static int __enc_payload(struct snp_guest_dev *snp_dev, struct snp_guest_msg *msg,
250 			 void *plaintext, size_t len)
251 {
252 	struct snp_guest_crypto *crypto = snp_dev->crypto;
253 	struct snp_guest_msg_hdr *hdr = &msg->hdr;
254 
255 	memset(crypto->iv, 0, crypto->iv_len);
256 	memcpy(crypto->iv, &hdr->msg_seqno, sizeof(hdr->msg_seqno));
257 
258 	return enc_dec_message(crypto, msg, plaintext, msg->payload, len, true);
259 }
260 
261 static int dec_payload(struct snp_guest_dev *snp_dev, struct snp_guest_msg *msg,
262 		       void *plaintext, size_t len)
263 {
264 	struct snp_guest_crypto *crypto = snp_dev->crypto;
265 	struct snp_guest_msg_hdr *hdr = &msg->hdr;
266 
267 	/* Build IV with response buffer sequence number */
268 	memset(crypto->iv, 0, crypto->iv_len);
269 	memcpy(crypto->iv, &hdr->msg_seqno, sizeof(hdr->msg_seqno));
270 
271 	return enc_dec_message(crypto, msg, msg->payload, plaintext, len, false);
272 }
273 
274 static int verify_and_dec_payload(struct snp_guest_dev *snp_dev, void *payload, u32 sz)
275 {
276 	struct snp_guest_crypto *crypto = snp_dev->crypto;
277 	struct snp_guest_msg *resp = &snp_dev->secret_response;
278 	struct snp_guest_msg *req = &snp_dev->secret_request;
279 	struct snp_guest_msg_hdr *req_hdr = &req->hdr;
280 	struct snp_guest_msg_hdr *resp_hdr = &resp->hdr;
281 
282 	dev_dbg(snp_dev->dev, "response [seqno %lld type %d version %d sz %d]\n",
283 		resp_hdr->msg_seqno, resp_hdr->msg_type, resp_hdr->msg_version, resp_hdr->msg_sz);
284 
285 	/* Copy response from shared memory to encrypted memory. */
286 	memcpy(resp, snp_dev->response, sizeof(*resp));
287 
288 	/* Verify that the sequence counter is incremented by 1 */
289 	if (unlikely(resp_hdr->msg_seqno != (req_hdr->msg_seqno + 1)))
290 		return -EBADMSG;
291 
292 	/* Verify response message type and version number. */
293 	if (resp_hdr->msg_type != (req_hdr->msg_type + 1) ||
294 	    resp_hdr->msg_version != req_hdr->msg_version)
295 		return -EBADMSG;
296 
297 	/*
298 	 * If the message size is greater than our buffer length then return
299 	 * an error.
300 	 */
301 	if (unlikely((resp_hdr->msg_sz + crypto->a_len) > sz))
302 		return -EBADMSG;
303 
304 	/* Decrypt the payload */
305 	return dec_payload(snp_dev, resp, payload, resp_hdr->msg_sz + crypto->a_len);
306 }
307 
308 static int enc_payload(struct snp_guest_dev *snp_dev, u64 seqno, int version, u8 type,
309 			void *payload, size_t sz)
310 {
311 	struct snp_guest_msg *req = &snp_dev->secret_request;
312 	struct snp_guest_msg_hdr *hdr = &req->hdr;
313 
314 	memset(req, 0, sizeof(*req));
315 
316 	hdr->algo = SNP_AEAD_AES_256_GCM;
317 	hdr->hdr_version = MSG_HDR_VER;
318 	hdr->hdr_sz = sizeof(*hdr);
319 	hdr->msg_type = type;
320 	hdr->msg_version = version;
321 	hdr->msg_seqno = seqno;
322 	hdr->msg_vmpck = vmpck_id;
323 	hdr->msg_sz = sz;
324 
325 	/* Verify the sequence number is non-zero */
326 	if (!hdr->msg_seqno)
327 		return -ENOSR;
328 
329 	dev_dbg(snp_dev->dev, "request [seqno %lld type %d version %d sz %d]\n",
330 		hdr->msg_seqno, hdr->msg_type, hdr->msg_version, hdr->msg_sz);
331 
332 	return __enc_payload(snp_dev, req, payload, sz);
333 }
334 
335 static int __handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, __u64 *fw_err)
336 {
337 	unsigned long err = 0xff, override_err = 0;
338 	unsigned long req_start = jiffies;
339 	unsigned int override_npages = 0;
340 	int rc;
341 
342 retry_request:
343 	/*
344 	 * Call firmware to process the request. In this function the encrypted
345 	 * message enters shared memory with the host. So after this call the
346 	 * sequence number must be incremented or the VMPCK must be deleted to
347 	 * prevent reuse of the IV.
348 	 */
349 	rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err);
350 	switch (rc) {
351 	case -ENOSPC:
352 		/*
353 		 * If the extended guest request fails due to having too
354 		 * small of a certificate data buffer, retry the same
355 		 * guest request without the extended data request in
356 		 * order to increment the sequence number and thus avoid
357 		 * IV reuse.
358 		 */
359 		override_npages = snp_dev->input.data_npages;
360 		exit_code	= SVM_VMGEXIT_GUEST_REQUEST;
361 
362 		/*
363 		 * Override the error to inform callers the given extended
364 		 * request buffer size was too small and give the caller the
365 		 * required buffer size.
366 		 */
367 		override_err	= SNP_GUEST_REQ_INVALID_LEN;
368 
369 		/*
370 		 * If this call to the firmware succeeds, the sequence number can
371 		 * be incremented allowing for continued use of the VMPCK. If
372 		 * there is an error reflected in the return value, this value
373 		 * is checked further down and the result will be the deletion
374 		 * of the VMPCK and the error code being propagated back to the
375 		 * user as an ioctl() return code.
376 		 */
377 		goto retry_request;
378 
379 	/*
380 	 * The host may return SNP_GUEST_REQ_ERR_EBUSY if the request has been
381 	 * throttled. Retry in the driver to avoid returning and reusing the
382 	 * message sequence number on a different message.
383 	 */
384 	case -EAGAIN:
385 		if (jiffies - req_start > SNP_REQ_MAX_RETRY_DURATION) {
386 			rc = -ETIMEDOUT;
387 			break;
388 		}
389 		schedule_timeout_killable(SNP_REQ_RETRY_DELAY);
390 		goto retry_request;
391 	}
392 
393 	/*
394 	 * Increment the message sequence number. There is no harm in doing
395 	 * this now because decryption uses the value stored in the response
396 	 * structure and any failure will wipe the VMPCK, preventing further
397 	 * use anyway.
398 	 */
399 	snp_inc_msg_seqno(snp_dev);
400 
401 	if (fw_err)
402 		*fw_err = override_err ?: err;
403 
404 	if (override_npages)
405 		snp_dev->input.data_npages = override_npages;
406 
407 	/*
408 	 * If an extended guest request was issued and the supplied certificate
409 	 * buffer was not large enough, a standard guest request was issued to
410 	 * prevent IV reuse. If the standard request was successful, return -EIO
411 	 * back to the caller as would have originally been returned.
412 	 */
413 	if (!rc && override_err == SNP_GUEST_REQ_INVALID_LEN)
414 		return -EIO;
415 
416 	return rc;
417 }
418 
419 static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, int msg_ver,
420 				u8 type, void *req_buf, size_t req_sz, void *resp_buf,
421 				u32 resp_sz, __u64 *fw_err)
422 {
423 	u64 seqno;
424 	int rc;
425 
426 	/* Get message sequence and verify that its a non-zero */
427 	seqno = snp_get_msg_seqno(snp_dev);
428 	if (!seqno)
429 		return -EIO;
430 
431 	/* Clear shared memory's response for the host to populate. */
432 	memset(snp_dev->response, 0, sizeof(struct snp_guest_msg));
433 
434 	/* Encrypt the userspace provided payload in snp_dev->secret_request. */
435 	rc = enc_payload(snp_dev, seqno, msg_ver, type, req_buf, req_sz);
436 	if (rc)
437 		return rc;
438 
439 	/*
440 	 * Write the fully encrypted request to the shared unencrypted
441 	 * request page.
442 	 */
443 	memcpy(snp_dev->request, &snp_dev->secret_request,
444 	       sizeof(snp_dev->secret_request));
445 
446 	rc = __handle_guest_request(snp_dev, exit_code, fw_err);
447 	if (rc) {
448 		if (rc == -EIO && *fw_err == SNP_GUEST_REQ_INVALID_LEN)
449 			return rc;
450 
451 		dev_alert(snp_dev->dev, "Detected error from ASP request. rc: %d, fw_err: %llu\n", rc, *fw_err);
452 		snp_disable_vmpck(snp_dev);
453 		return rc;
454 	}
455 
456 	rc = verify_and_dec_payload(snp_dev, resp_buf, resp_sz);
457 	if (rc) {
458 		dev_alert(snp_dev->dev, "Detected unexpected decode failure from ASP. rc: %d\n", rc);
459 		snp_disable_vmpck(snp_dev);
460 		return rc;
461 	}
462 
463 	return 0;
464 }
465 
466 static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
467 {
468 	struct snp_guest_crypto *crypto = snp_dev->crypto;
469 	struct snp_report_resp *resp;
470 	struct snp_report_req req;
471 	int rc, resp_len;
472 
473 	lockdep_assert_held(&snp_cmd_mutex);
474 
475 	if (!arg->req_data || !arg->resp_data)
476 		return -EINVAL;
477 
478 	if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
479 		return -EFAULT;
480 
481 	/*
482 	 * The intermediate response buffer is used while decrypting the
483 	 * response payload. Make sure that it has enough space to cover the
484 	 * authtag.
485 	 */
486 	resp_len = sizeof(resp->data) + crypto->a_len;
487 	resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);
488 	if (!resp)
489 		return -ENOMEM;
490 
491 	rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg->msg_version,
492 				  SNP_MSG_REPORT_REQ, &req, sizeof(req), resp->data,
493 				  resp_len, &arg->fw_err);
494 	if (rc)
495 		goto e_free;
496 
497 	if (copy_to_user((void __user *)arg->resp_data, resp, sizeof(*resp)))
498 		rc = -EFAULT;
499 
500 e_free:
501 	kfree(resp);
502 	return rc;
503 }
504 
505 static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
506 {
507 	struct snp_guest_crypto *crypto = snp_dev->crypto;
508 	struct snp_derived_key_resp resp = {0};
509 	struct snp_derived_key_req req;
510 	int rc, resp_len;
511 	/* Response data is 64 bytes and max authsize for GCM is 16 bytes. */
512 	u8 buf[64 + 16];
513 
514 	lockdep_assert_held(&snp_cmd_mutex);
515 
516 	if (!arg->req_data || !arg->resp_data)
517 		return -EINVAL;
518 
519 	/*
520 	 * The intermediate response buffer is used while decrypting the
521 	 * response payload. Make sure that it has enough space to cover the
522 	 * authtag.
523 	 */
524 	resp_len = sizeof(resp.data) + crypto->a_len;
525 	if (sizeof(buf) < resp_len)
526 		return -ENOMEM;
527 
528 	if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
529 		return -EFAULT;
530 
531 	rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg->msg_version,
532 				  SNP_MSG_KEY_REQ, &req, sizeof(req), buf, resp_len,
533 				  &arg->fw_err);
534 	if (rc)
535 		return rc;
536 
537 	memcpy(resp.data, buf, sizeof(resp.data));
538 	if (copy_to_user((void __user *)arg->resp_data, &resp, sizeof(resp)))
539 		rc = -EFAULT;
540 
541 	/* The response buffer contains the sensitive data, explicitly clear it. */
542 	memzero_explicit(buf, sizeof(buf));
543 	memzero_explicit(&resp, sizeof(resp));
544 	return rc;
545 }
546 
547 static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
548 {
549 	struct snp_guest_crypto *crypto = snp_dev->crypto;
550 	struct snp_ext_report_req req;
551 	struct snp_report_resp *resp;
552 	int ret, npages = 0, resp_len;
553 
554 	lockdep_assert_held(&snp_cmd_mutex);
555 
556 	if (!arg->req_data || !arg->resp_data)
557 		return -EINVAL;
558 
559 	if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
560 		return -EFAULT;
561 
562 	/* userspace does not want certificate data */
563 	if (!req.certs_len || !req.certs_address)
564 		goto cmd;
565 
566 	if (req.certs_len > SEV_FW_BLOB_MAX_SIZE ||
567 	    !IS_ALIGNED(req.certs_len, PAGE_SIZE))
568 		return -EINVAL;
569 
570 	if (!access_ok((const void __user *)req.certs_address, req.certs_len))
571 		return -EFAULT;
572 
573 	/*
574 	 * Initialize the intermediate buffer with all zeros. This buffer
575 	 * is used in the guest request message to get the certs blob from
576 	 * the host. If host does not supply any certs in it, then copy
577 	 * zeros to indicate that certificate data was not provided.
578 	 */
579 	memset(snp_dev->certs_data, 0, req.certs_len);
580 	npages = req.certs_len >> PAGE_SHIFT;
581 cmd:
582 	/*
583 	 * The intermediate response buffer is used while decrypting the
584 	 * response payload. Make sure that it has enough space to cover the
585 	 * authtag.
586 	 */
587 	resp_len = sizeof(resp->data) + crypto->a_len;
588 	resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);
589 	if (!resp)
590 		return -ENOMEM;
591 
592 	snp_dev->input.data_npages = npages;
593 	ret = handle_guest_request(snp_dev, SVM_VMGEXIT_EXT_GUEST_REQUEST, arg->msg_version,
594 				   SNP_MSG_REPORT_REQ, &req.data,
595 				   sizeof(req.data), resp->data, resp_len, &arg->fw_err);
596 
597 	/* If certs length is invalid then copy the returned length */
598 	if (arg->fw_err == SNP_GUEST_REQ_INVALID_LEN) {
599 		req.certs_len = snp_dev->input.data_npages << PAGE_SHIFT;
600 
601 		if (copy_to_user((void __user *)arg->req_data, &req, sizeof(req)))
602 			ret = -EFAULT;
603 	}
604 
605 	if (ret)
606 		goto e_free;
607 
608 	if (npages &&
609 	    copy_to_user((void __user *)req.certs_address, snp_dev->certs_data,
610 			 req.certs_len)) {
611 		ret = -EFAULT;
612 		goto e_free;
613 	}
614 
615 	if (copy_to_user((void __user *)arg->resp_data, resp, sizeof(*resp)))
616 		ret = -EFAULT;
617 
618 e_free:
619 	kfree(resp);
620 	return ret;
621 }
622 
623 static long snp_guest_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
624 {
625 	struct snp_guest_dev *snp_dev = to_snp_dev(file);
626 	void __user *argp = (void __user *)arg;
627 	struct snp_guest_request_ioctl input;
628 	int ret = -ENOTTY;
629 
630 	if (copy_from_user(&input, argp, sizeof(input)))
631 		return -EFAULT;
632 
633 	input.fw_err = 0xff;
634 
635 	/* Message version must be non-zero */
636 	if (!input.msg_version)
637 		return -EINVAL;
638 
639 	mutex_lock(&snp_cmd_mutex);
640 
641 	/* Check if the VMPCK is not empty */
642 	if (is_vmpck_empty(snp_dev)) {
643 		dev_err_ratelimited(snp_dev->dev, "VMPCK is disabled\n");
644 		mutex_unlock(&snp_cmd_mutex);
645 		return -ENOTTY;
646 	}
647 
648 	switch (ioctl) {
649 	case SNP_GET_REPORT:
650 		ret = get_report(snp_dev, &input);
651 		break;
652 	case SNP_GET_DERIVED_KEY:
653 		ret = get_derived_key(snp_dev, &input);
654 		break;
655 	case SNP_GET_EXT_REPORT:
656 		ret = get_ext_report(snp_dev, &input);
657 		break;
658 	default:
659 		break;
660 	}
661 
662 	mutex_unlock(&snp_cmd_mutex);
663 
664 	if (input.fw_err && copy_to_user(argp, &input, sizeof(input)))
665 		return -EFAULT;
666 
667 	return ret;
668 }
669 
670 static void free_shared_pages(void *buf, size_t sz)
671 {
672 	unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
673 	int ret;
674 
675 	if (!buf)
676 		return;
677 
678 	ret = set_memory_encrypted((unsigned long)buf, npages);
679 	if (ret) {
680 		WARN_ONCE(ret, "failed to restore encryption mask (leak it)\n");
681 		return;
682 	}
683 
684 	__free_pages(virt_to_page(buf), get_order(sz));
685 }
686 
687 static void *alloc_shared_pages(struct device *dev, size_t sz)
688 {
689 	unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
690 	struct page *page;
691 	int ret;
692 
693 	page = alloc_pages(GFP_KERNEL_ACCOUNT, get_order(sz));
694 	if (!page)
695 		return NULL;
696 
697 	ret = set_memory_decrypted((unsigned long)page_address(page), npages);
698 	if (ret) {
699 		dev_err(dev, "failed to mark page shared, ret=%d\n", ret);
700 		__free_pages(page, get_order(sz));
701 		return NULL;
702 	}
703 
704 	return page_address(page);
705 }
706 
707 static const struct file_operations snp_guest_fops = {
708 	.owner	= THIS_MODULE,
709 	.unlocked_ioctl = snp_guest_ioctl,
710 };
711 
712 static u8 *get_vmpck(int id, struct snp_secrets_page_layout *layout, u32 **seqno)
713 {
714 	u8 *key = NULL;
715 
716 	switch (id) {
717 	case 0:
718 		*seqno = &layout->os_area.msg_seqno_0;
719 		key = layout->vmpck0;
720 		break;
721 	case 1:
722 		*seqno = &layout->os_area.msg_seqno_1;
723 		key = layout->vmpck1;
724 		break;
725 	case 2:
726 		*seqno = &layout->os_area.msg_seqno_2;
727 		key = layout->vmpck2;
728 		break;
729 	case 3:
730 		*seqno = &layout->os_area.msg_seqno_3;
731 		key = layout->vmpck3;
732 		break;
733 	default:
734 		break;
735 	}
736 
737 	return key;
738 }
739 
740 static int __init sev_guest_probe(struct platform_device *pdev)
741 {
742 	struct snp_secrets_page_layout *layout;
743 	struct sev_guest_platform_data *data;
744 	struct device *dev = &pdev->dev;
745 	struct snp_guest_dev *snp_dev;
746 	struct miscdevice *misc;
747 	void __iomem *mapping;
748 	int ret;
749 
750 	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
751 		return -ENODEV;
752 
753 	if (!dev->platform_data)
754 		return -ENODEV;
755 
756 	data = (struct sev_guest_platform_data *)dev->platform_data;
757 	mapping = ioremap_encrypted(data->secrets_gpa, PAGE_SIZE);
758 	if (!mapping)
759 		return -ENODEV;
760 
761 	layout = (__force void *)mapping;
762 
763 	ret = -ENOMEM;
764 	snp_dev = devm_kzalloc(&pdev->dev, sizeof(struct snp_guest_dev), GFP_KERNEL);
765 	if (!snp_dev)
766 		goto e_unmap;
767 
768 	ret = -EINVAL;
769 	snp_dev->vmpck = get_vmpck(vmpck_id, layout, &snp_dev->os_area_msg_seqno);
770 	if (!snp_dev->vmpck) {
771 		dev_err(dev, "invalid vmpck id %d\n", vmpck_id);
772 		goto e_unmap;
773 	}
774 
775 	/* Verify that VMPCK is not zero. */
776 	if (is_vmpck_empty(snp_dev)) {
777 		dev_err(dev, "vmpck id %d is null\n", vmpck_id);
778 		goto e_unmap;
779 	}
780 
781 	platform_set_drvdata(pdev, snp_dev);
782 	snp_dev->dev = dev;
783 	snp_dev->layout = layout;
784 
785 	/* Allocate the shared page used for the request and response message. */
786 	snp_dev->request = alloc_shared_pages(dev, sizeof(struct snp_guest_msg));
787 	if (!snp_dev->request)
788 		goto e_unmap;
789 
790 	snp_dev->response = alloc_shared_pages(dev, sizeof(struct snp_guest_msg));
791 	if (!snp_dev->response)
792 		goto e_free_request;
793 
794 	snp_dev->certs_data = alloc_shared_pages(dev, SEV_FW_BLOB_MAX_SIZE);
795 	if (!snp_dev->certs_data)
796 		goto e_free_response;
797 
798 	ret = -EIO;
799 	snp_dev->crypto = init_crypto(snp_dev, snp_dev->vmpck, VMPCK_KEY_LEN);
800 	if (!snp_dev->crypto)
801 		goto e_free_cert_data;
802 
803 	misc = &snp_dev->misc;
804 	misc->minor = MISC_DYNAMIC_MINOR;
805 	misc->name = DEVICE_NAME;
806 	misc->fops = &snp_guest_fops;
807 
808 	/* initial the input address for guest request */
809 	snp_dev->input.req_gpa = __pa(snp_dev->request);
810 	snp_dev->input.resp_gpa = __pa(snp_dev->response);
811 	snp_dev->input.data_gpa = __pa(snp_dev->certs_data);
812 
813 	ret =  misc_register(misc);
814 	if (ret)
815 		goto e_free_cert_data;
816 
817 	dev_info(dev, "Initialized SEV guest driver (using vmpck_id %d)\n", vmpck_id);
818 	return 0;
819 
820 e_free_cert_data:
821 	free_shared_pages(snp_dev->certs_data, SEV_FW_BLOB_MAX_SIZE);
822 e_free_response:
823 	free_shared_pages(snp_dev->response, sizeof(struct snp_guest_msg));
824 e_free_request:
825 	free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg));
826 e_unmap:
827 	iounmap(mapping);
828 	return ret;
829 }
830 
831 static int __exit sev_guest_remove(struct platform_device *pdev)
832 {
833 	struct snp_guest_dev *snp_dev = platform_get_drvdata(pdev);
834 
835 	free_shared_pages(snp_dev->certs_data, SEV_FW_BLOB_MAX_SIZE);
836 	free_shared_pages(snp_dev->response, sizeof(struct snp_guest_msg));
837 	free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg));
838 	deinit_crypto(snp_dev->crypto);
839 	misc_deregister(&snp_dev->misc);
840 
841 	return 0;
842 }
843 
844 /*
845  * This driver is meant to be a common SEV guest interface driver and to
846  * support any SEV guest API. As such, even though it has been introduced
847  * with the SEV-SNP support, it is named "sev-guest".
848  */
849 static struct platform_driver sev_guest_driver = {
850 	.remove		= __exit_p(sev_guest_remove),
851 	.driver		= {
852 		.name = "sev-guest",
853 	},
854 };
855 
856 module_platform_driver_probe(sev_guest_driver, sev_guest_probe);
857 
858 MODULE_AUTHOR("Brijesh Singh <brijesh.singh@amd.com>");
859 MODULE_LICENSE("GPL");
860 MODULE_VERSION("1.0.0");
861 MODULE_DESCRIPTION("AMD SEV Guest Driver");
862 MODULE_ALIAS("platform:sev-guest");
863