1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * AMD Secure Encrypted Virtualization (SEV) guest driver interface
4 *
5 * Copyright (C) 2021 Advanced Micro Devices, Inc.
6 *
7 * Author: Brijesh Singh <brijesh.singh@amd.com>
8 */
9
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/mutex.h>
14 #include <linux/io.h>
15 #include <linux/platform_device.h>
16 #include <linux/miscdevice.h>
17 #include <linux/set_memory.h>
18 #include <linux/fs.h>
19 #include <crypto/aead.h>
20 #include <linux/scatterlist.h>
21 #include <linux/psp-sev.h>
22 #include <uapi/linux/sev-guest.h>
23 #include <uapi/linux/psp-sev.h>
24
25 #include <asm/svm.h>
26 #include <asm/sev.h>
27
28 #include "sev-guest.h"
29
30 #define DEVICE_NAME "sev-guest"
31 #define AAD_LEN 48
32 #define MSG_HDR_VER 1
33
34 #define SNP_REQ_MAX_RETRY_DURATION (60*HZ)
35 #define SNP_REQ_RETRY_DELAY (2*HZ)
36
37 struct snp_guest_crypto {
38 struct crypto_aead *tfm;
39 u8 *iv, *authtag;
40 int iv_len, a_len;
41 };
42
43 struct snp_guest_dev {
44 struct device *dev;
45 struct miscdevice misc;
46
47 void *certs_data;
48 struct snp_guest_crypto *crypto;
49 /* request and response are in unencrypted memory */
50 struct snp_guest_msg *request, *response;
51
52 /*
53 * Avoid information leakage by double-buffering shared messages
54 * in fields that are in regular encrypted memory.
55 */
56 struct snp_guest_msg secret_request, secret_response;
57
58 struct snp_secrets_page_layout *layout;
59 struct snp_req_data input;
60 u32 *os_area_msg_seqno;
61 u8 *vmpck;
62 };
63
64 static u32 vmpck_id;
65 module_param(vmpck_id, uint, 0444);
66 MODULE_PARM_DESC(vmpck_id, "The VMPCK ID to use when communicating with the PSP.");
67
68 /* Mutex to serialize the shared buffer access and command handling. */
69 static DEFINE_MUTEX(snp_cmd_mutex);
70
is_vmpck_empty(struct snp_guest_dev * snp_dev)71 static bool is_vmpck_empty(struct snp_guest_dev *snp_dev)
72 {
73 char zero_key[VMPCK_KEY_LEN] = {0};
74
75 if (snp_dev->vmpck)
76 return !memcmp(snp_dev->vmpck, zero_key, VMPCK_KEY_LEN);
77
78 return true;
79 }
80
81 /*
82 * If an error is received from the host or AMD Secure Processor (ASP) there
83 * are two options. Either retry the exact same encrypted request or discontinue
84 * using the VMPCK.
85 *
86 * This is because in the current encryption scheme GHCB v2 uses AES-GCM to
87 * encrypt the requests. The IV for this scheme is the sequence number. GCM
88 * cannot tolerate IV reuse.
89 *
90 * The ASP FW v1.51 only increments the sequence numbers on a successful
91 * guest<->ASP back and forth and only accepts messages at its exact sequence
92 * number.
93 *
94 * So if the sequence number were to be reused the encryption scheme is
95 * vulnerable. If the sequence number were incremented for a fresh IV the ASP
96 * will reject the request.
97 */
snp_disable_vmpck(struct snp_guest_dev * snp_dev)98 static void snp_disable_vmpck(struct snp_guest_dev *snp_dev)
99 {
100 dev_alert(snp_dev->dev, "Disabling vmpck_id %d to prevent IV reuse.\n",
101 vmpck_id);
102 memzero_explicit(snp_dev->vmpck, VMPCK_KEY_LEN);
103 snp_dev->vmpck = NULL;
104 }
105
__snp_get_msg_seqno(struct snp_guest_dev * snp_dev)106 static inline u64 __snp_get_msg_seqno(struct snp_guest_dev *snp_dev)
107 {
108 u64 count;
109
110 lockdep_assert_held(&snp_cmd_mutex);
111
112 /* Read the current message sequence counter from secrets pages */
113 count = *snp_dev->os_area_msg_seqno;
114
115 return count + 1;
116 }
117
118 /* Return a non-zero on success */
snp_get_msg_seqno(struct snp_guest_dev * snp_dev)119 static u64 snp_get_msg_seqno(struct snp_guest_dev *snp_dev)
120 {
121 u64 count = __snp_get_msg_seqno(snp_dev);
122
123 /*
124 * The message sequence counter for the SNP guest request is a 64-bit
125 * value but the version 2 of GHCB specification defines a 32-bit storage
126 * for it. If the counter exceeds the 32-bit value then return zero.
127 * The caller should check the return value, but if the caller happens to
128 * not check the value and use it, then the firmware treats zero as an
129 * invalid number and will fail the message request.
130 */
131 if (count >= UINT_MAX) {
132 dev_err(snp_dev->dev, "request message sequence counter overflow\n");
133 return 0;
134 }
135
136 return count;
137 }
138
snp_inc_msg_seqno(struct snp_guest_dev * snp_dev)139 static void snp_inc_msg_seqno(struct snp_guest_dev *snp_dev)
140 {
141 /*
142 * The counter is also incremented by the PSP, so increment it by 2
143 * and save in secrets page.
144 */
145 *snp_dev->os_area_msg_seqno += 2;
146 }
147
to_snp_dev(struct file * file)148 static inline struct snp_guest_dev *to_snp_dev(struct file *file)
149 {
150 struct miscdevice *dev = file->private_data;
151
152 return container_of(dev, struct snp_guest_dev, misc);
153 }
154
init_crypto(struct snp_guest_dev * snp_dev,u8 * key,size_t keylen)155 static struct snp_guest_crypto *init_crypto(struct snp_guest_dev *snp_dev, u8 *key, size_t keylen)
156 {
157 struct snp_guest_crypto *crypto;
158
159 crypto = kzalloc(sizeof(*crypto), GFP_KERNEL_ACCOUNT);
160 if (!crypto)
161 return NULL;
162
163 crypto->tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
164 if (IS_ERR(crypto->tfm))
165 goto e_free;
166
167 if (crypto_aead_setkey(crypto->tfm, key, keylen))
168 goto e_free_crypto;
169
170 crypto->iv_len = crypto_aead_ivsize(crypto->tfm);
171 crypto->iv = kmalloc(crypto->iv_len, GFP_KERNEL_ACCOUNT);
172 if (!crypto->iv)
173 goto e_free_crypto;
174
175 if (crypto_aead_authsize(crypto->tfm) > MAX_AUTHTAG_LEN) {
176 if (crypto_aead_setauthsize(crypto->tfm, MAX_AUTHTAG_LEN)) {
177 dev_err(snp_dev->dev, "failed to set authsize to %d\n", MAX_AUTHTAG_LEN);
178 goto e_free_iv;
179 }
180 }
181
182 crypto->a_len = crypto_aead_authsize(crypto->tfm);
183 crypto->authtag = kmalloc(crypto->a_len, GFP_KERNEL_ACCOUNT);
184 if (!crypto->authtag)
185 goto e_free_iv;
186
187 return crypto;
188
189 e_free_iv:
190 kfree(crypto->iv);
191 e_free_crypto:
192 crypto_free_aead(crypto->tfm);
193 e_free:
194 kfree(crypto);
195
196 return NULL;
197 }
198
deinit_crypto(struct snp_guest_crypto * crypto)199 static void deinit_crypto(struct snp_guest_crypto *crypto)
200 {
201 crypto_free_aead(crypto->tfm);
202 kfree(crypto->iv);
203 kfree(crypto->authtag);
204 kfree(crypto);
205 }
206
enc_dec_message(struct snp_guest_crypto * crypto,struct snp_guest_msg * msg,u8 * src_buf,u8 * dst_buf,size_t len,bool enc)207 static int enc_dec_message(struct snp_guest_crypto *crypto, struct snp_guest_msg *msg,
208 u8 *src_buf, u8 *dst_buf, size_t len, bool enc)
209 {
210 struct snp_guest_msg_hdr *hdr = &msg->hdr;
211 struct scatterlist src[3], dst[3];
212 DECLARE_CRYPTO_WAIT(wait);
213 struct aead_request *req;
214 int ret;
215
216 req = aead_request_alloc(crypto->tfm, GFP_KERNEL);
217 if (!req)
218 return -ENOMEM;
219
220 /*
221 * AEAD memory operations:
222 * +------ AAD -------+------- DATA -----+---- AUTHTAG----+
223 * | msg header | plaintext | hdr->authtag |
224 * | bytes 30h - 5Fh | or | |
225 * | | cipher | |
226 * +------------------+------------------+----------------+
227 */
228 sg_init_table(src, 3);
229 sg_set_buf(&src[0], &hdr->algo, AAD_LEN);
230 sg_set_buf(&src[1], src_buf, hdr->msg_sz);
231 sg_set_buf(&src[2], hdr->authtag, crypto->a_len);
232
233 sg_init_table(dst, 3);
234 sg_set_buf(&dst[0], &hdr->algo, AAD_LEN);
235 sg_set_buf(&dst[1], dst_buf, hdr->msg_sz);
236 sg_set_buf(&dst[2], hdr->authtag, crypto->a_len);
237
238 aead_request_set_ad(req, AAD_LEN);
239 aead_request_set_tfm(req, crypto->tfm);
240 aead_request_set_callback(req, 0, crypto_req_done, &wait);
241
242 aead_request_set_crypt(req, src, dst, len, crypto->iv);
243 ret = crypto_wait_req(enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req), &wait);
244
245 aead_request_free(req);
246 return ret;
247 }
248
__enc_payload(struct snp_guest_dev * snp_dev,struct snp_guest_msg * msg,void * plaintext,size_t len)249 static int __enc_payload(struct snp_guest_dev *snp_dev, struct snp_guest_msg *msg,
250 void *plaintext, size_t len)
251 {
252 struct snp_guest_crypto *crypto = snp_dev->crypto;
253 struct snp_guest_msg_hdr *hdr = &msg->hdr;
254
255 memset(crypto->iv, 0, crypto->iv_len);
256 memcpy(crypto->iv, &hdr->msg_seqno, sizeof(hdr->msg_seqno));
257
258 return enc_dec_message(crypto, msg, plaintext, msg->payload, len, true);
259 }
260
dec_payload(struct snp_guest_dev * snp_dev,struct snp_guest_msg * msg,void * plaintext,size_t len)261 static int dec_payload(struct snp_guest_dev *snp_dev, struct snp_guest_msg *msg,
262 void *plaintext, size_t len)
263 {
264 struct snp_guest_crypto *crypto = snp_dev->crypto;
265 struct snp_guest_msg_hdr *hdr = &msg->hdr;
266
267 /* Build IV with response buffer sequence number */
268 memset(crypto->iv, 0, crypto->iv_len);
269 memcpy(crypto->iv, &hdr->msg_seqno, sizeof(hdr->msg_seqno));
270
271 return enc_dec_message(crypto, msg, msg->payload, plaintext, len, false);
272 }
273
verify_and_dec_payload(struct snp_guest_dev * snp_dev,void * payload,u32 sz)274 static int verify_and_dec_payload(struct snp_guest_dev *snp_dev, void *payload, u32 sz)
275 {
276 struct snp_guest_crypto *crypto = snp_dev->crypto;
277 struct snp_guest_msg *resp = &snp_dev->secret_response;
278 struct snp_guest_msg *req = &snp_dev->secret_request;
279 struct snp_guest_msg_hdr *req_hdr = &req->hdr;
280 struct snp_guest_msg_hdr *resp_hdr = &resp->hdr;
281
282 dev_dbg(snp_dev->dev, "response [seqno %lld type %d version %d sz %d]\n",
283 resp_hdr->msg_seqno, resp_hdr->msg_type, resp_hdr->msg_version, resp_hdr->msg_sz);
284
285 /* Copy response from shared memory to encrypted memory. */
286 memcpy(resp, snp_dev->response, sizeof(*resp));
287
288 /* Verify that the sequence counter is incremented by 1 */
289 if (unlikely(resp_hdr->msg_seqno != (req_hdr->msg_seqno + 1)))
290 return -EBADMSG;
291
292 /* Verify response message type and version number. */
293 if (resp_hdr->msg_type != (req_hdr->msg_type + 1) ||
294 resp_hdr->msg_version != req_hdr->msg_version)
295 return -EBADMSG;
296
297 /*
298 * If the message size is greater than our buffer length then return
299 * an error.
300 */
301 if (unlikely((resp_hdr->msg_sz + crypto->a_len) > sz))
302 return -EBADMSG;
303
304 /* Decrypt the payload */
305 return dec_payload(snp_dev, resp, payload, resp_hdr->msg_sz + crypto->a_len);
306 }
307
enc_payload(struct snp_guest_dev * snp_dev,u64 seqno,int version,u8 type,void * payload,size_t sz)308 static int enc_payload(struct snp_guest_dev *snp_dev, u64 seqno, int version, u8 type,
309 void *payload, size_t sz)
310 {
311 struct snp_guest_msg *req = &snp_dev->secret_request;
312 struct snp_guest_msg_hdr *hdr = &req->hdr;
313
314 memset(req, 0, sizeof(*req));
315
316 hdr->algo = SNP_AEAD_AES_256_GCM;
317 hdr->hdr_version = MSG_HDR_VER;
318 hdr->hdr_sz = sizeof(*hdr);
319 hdr->msg_type = type;
320 hdr->msg_version = version;
321 hdr->msg_seqno = seqno;
322 hdr->msg_vmpck = vmpck_id;
323 hdr->msg_sz = sz;
324
325 /* Verify the sequence number is non-zero */
326 if (!hdr->msg_seqno)
327 return -ENOSR;
328
329 dev_dbg(snp_dev->dev, "request [seqno %lld type %d version %d sz %d]\n",
330 hdr->msg_seqno, hdr->msg_type, hdr->msg_version, hdr->msg_sz);
331
332 return __enc_payload(snp_dev, req, payload, sz);
333 }
334
__handle_guest_request(struct snp_guest_dev * snp_dev,u64 exit_code,struct snp_guest_request_ioctl * rio)335 static int __handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code,
336 struct snp_guest_request_ioctl *rio)
337 {
338 unsigned long req_start = jiffies;
339 unsigned int override_npages = 0;
340 u64 override_err = 0;
341 int rc;
342
343 retry_request:
344 /*
345 * Call firmware to process the request. In this function the encrypted
346 * message enters shared memory with the host. So after this call the
347 * sequence number must be incremented or the VMPCK must be deleted to
348 * prevent reuse of the IV.
349 */
350 rc = snp_issue_guest_request(exit_code, &snp_dev->input, rio);
351 switch (rc) {
352 case -ENOSPC:
353 /*
354 * If the extended guest request fails due to having too
355 * small of a certificate data buffer, retry the same
356 * guest request without the extended data request in
357 * order to increment the sequence number and thus avoid
358 * IV reuse.
359 */
360 override_npages = snp_dev->input.data_npages;
361 exit_code = SVM_VMGEXIT_GUEST_REQUEST;
362
363 /*
364 * Override the error to inform callers the given extended
365 * request buffer size was too small and give the caller the
366 * required buffer size.
367 */
368 override_err = SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN);
369
370 /*
371 * If this call to the firmware succeeds, the sequence number can
372 * be incremented allowing for continued use of the VMPCK. If
373 * there is an error reflected in the return value, this value
374 * is checked further down and the result will be the deletion
375 * of the VMPCK and the error code being propagated back to the
376 * user as an ioctl() return code.
377 */
378 goto retry_request;
379
380 /*
381 * The host may return SNP_GUEST_VMM_ERR_BUSY if the request has been
382 * throttled. Retry in the driver to avoid returning and reusing the
383 * message sequence number on a different message.
384 */
385 case -EAGAIN:
386 if (jiffies - req_start > SNP_REQ_MAX_RETRY_DURATION) {
387 rc = -ETIMEDOUT;
388 break;
389 }
390 schedule_timeout_killable(SNP_REQ_RETRY_DELAY);
391 goto retry_request;
392 }
393
394 /*
395 * Increment the message sequence number. There is no harm in doing
396 * this now because decryption uses the value stored in the response
397 * structure and any failure will wipe the VMPCK, preventing further
398 * use anyway.
399 */
400 snp_inc_msg_seqno(snp_dev);
401
402 if (override_err) {
403 rio->exitinfo2 = override_err;
404
405 /*
406 * If an extended guest request was issued and the supplied certificate
407 * buffer was not large enough, a standard guest request was issued to
408 * prevent IV reuse. If the standard request was successful, return -EIO
409 * back to the caller as would have originally been returned.
410 */
411 if (!rc && override_err == SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN))
412 rc = -EIO;
413 }
414
415 if (override_npages)
416 snp_dev->input.data_npages = override_npages;
417
418 return rc;
419 }
420
handle_guest_request(struct snp_guest_dev * snp_dev,u64 exit_code,struct snp_guest_request_ioctl * rio,u8 type,void * req_buf,size_t req_sz,void * resp_buf,u32 resp_sz)421 static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code,
422 struct snp_guest_request_ioctl *rio, u8 type,
423 void *req_buf, size_t req_sz, void *resp_buf,
424 u32 resp_sz)
425 {
426 u64 seqno;
427 int rc;
428
429 /* Get message sequence and verify that its a non-zero */
430 seqno = snp_get_msg_seqno(snp_dev);
431 if (!seqno)
432 return -EIO;
433
434 /* Clear shared memory's response for the host to populate. */
435 memset(snp_dev->response, 0, sizeof(struct snp_guest_msg));
436
437 /* Encrypt the userspace provided payload in snp_dev->secret_request. */
438 rc = enc_payload(snp_dev, seqno, rio->msg_version, type, req_buf, req_sz);
439 if (rc)
440 return rc;
441
442 /*
443 * Write the fully encrypted request to the shared unencrypted
444 * request page.
445 */
446 memcpy(snp_dev->request, &snp_dev->secret_request,
447 sizeof(snp_dev->secret_request));
448
449 rc = __handle_guest_request(snp_dev, exit_code, rio);
450 if (rc) {
451 if (rc == -EIO &&
452 rio->exitinfo2 == SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN))
453 return rc;
454
455 dev_alert(snp_dev->dev,
456 "Detected error from ASP request. rc: %d, exitinfo2: 0x%llx\n",
457 rc, rio->exitinfo2);
458
459 snp_disable_vmpck(snp_dev);
460 return rc;
461 }
462
463 rc = verify_and_dec_payload(snp_dev, resp_buf, resp_sz);
464 if (rc) {
465 dev_alert(snp_dev->dev, "Detected unexpected decode failure from ASP. rc: %d\n", rc);
466 snp_disable_vmpck(snp_dev);
467 return rc;
468 }
469
470 return 0;
471 }
472
get_report(struct snp_guest_dev * snp_dev,struct snp_guest_request_ioctl * arg)473 static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
474 {
475 struct snp_guest_crypto *crypto = snp_dev->crypto;
476 struct snp_report_resp *resp;
477 struct snp_report_req req;
478 int rc, resp_len;
479
480 lockdep_assert_held(&snp_cmd_mutex);
481
482 if (!arg->req_data || !arg->resp_data)
483 return -EINVAL;
484
485 if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
486 return -EFAULT;
487
488 /*
489 * The intermediate response buffer is used while decrypting the
490 * response payload. Make sure that it has enough space to cover the
491 * authtag.
492 */
493 resp_len = sizeof(resp->data) + crypto->a_len;
494 resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);
495 if (!resp)
496 return -ENOMEM;
497
498 rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg,
499 SNP_MSG_REPORT_REQ, &req, sizeof(req), resp->data,
500 resp_len);
501 if (rc)
502 goto e_free;
503
504 if (copy_to_user((void __user *)arg->resp_data, resp, sizeof(*resp)))
505 rc = -EFAULT;
506
507 e_free:
508 kfree(resp);
509 return rc;
510 }
511
get_derived_key(struct snp_guest_dev * snp_dev,struct snp_guest_request_ioctl * arg)512 static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
513 {
514 struct snp_guest_crypto *crypto = snp_dev->crypto;
515 struct snp_derived_key_resp resp = {0};
516 struct snp_derived_key_req req;
517 int rc, resp_len;
518 /* Response data is 64 bytes and max authsize for GCM is 16 bytes. */
519 u8 buf[64 + 16];
520
521 lockdep_assert_held(&snp_cmd_mutex);
522
523 if (!arg->req_data || !arg->resp_data)
524 return -EINVAL;
525
526 /*
527 * The intermediate response buffer is used while decrypting the
528 * response payload. Make sure that it has enough space to cover the
529 * authtag.
530 */
531 resp_len = sizeof(resp.data) + crypto->a_len;
532 if (sizeof(buf) < resp_len)
533 return -ENOMEM;
534
535 if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
536 return -EFAULT;
537
538 rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg,
539 SNP_MSG_KEY_REQ, &req, sizeof(req), buf, resp_len);
540 if (rc)
541 return rc;
542
543 memcpy(resp.data, buf, sizeof(resp.data));
544 if (copy_to_user((void __user *)arg->resp_data, &resp, sizeof(resp)))
545 rc = -EFAULT;
546
547 /* The response buffer contains the sensitive data, explicitly clear it. */
548 memzero_explicit(buf, sizeof(buf));
549 memzero_explicit(&resp, sizeof(resp));
550 return rc;
551 }
552
get_ext_report(struct snp_guest_dev * snp_dev,struct snp_guest_request_ioctl * arg)553 static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
554 {
555 struct snp_guest_crypto *crypto = snp_dev->crypto;
556 struct snp_ext_report_req req;
557 struct snp_report_resp *resp;
558 int ret, npages = 0, resp_len;
559
560 lockdep_assert_held(&snp_cmd_mutex);
561
562 if (!arg->req_data || !arg->resp_data)
563 return -EINVAL;
564
565 if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
566 return -EFAULT;
567
568 /* userspace does not want certificate data */
569 if (!req.certs_len || !req.certs_address)
570 goto cmd;
571
572 if (req.certs_len > SEV_FW_BLOB_MAX_SIZE ||
573 !IS_ALIGNED(req.certs_len, PAGE_SIZE))
574 return -EINVAL;
575
576 if (!access_ok((const void __user *)req.certs_address, req.certs_len))
577 return -EFAULT;
578
579 /*
580 * Initialize the intermediate buffer with all zeros. This buffer
581 * is used in the guest request message to get the certs blob from
582 * the host. If host does not supply any certs in it, then copy
583 * zeros to indicate that certificate data was not provided.
584 */
585 memset(snp_dev->certs_data, 0, req.certs_len);
586 npages = req.certs_len >> PAGE_SHIFT;
587 cmd:
588 /*
589 * The intermediate response buffer is used while decrypting the
590 * response payload. Make sure that it has enough space to cover the
591 * authtag.
592 */
593 resp_len = sizeof(resp->data) + crypto->a_len;
594 resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);
595 if (!resp)
596 return -ENOMEM;
597
598 snp_dev->input.data_npages = npages;
599 ret = handle_guest_request(snp_dev, SVM_VMGEXIT_EXT_GUEST_REQUEST, arg,
600 SNP_MSG_REPORT_REQ, &req.data,
601 sizeof(req.data), resp->data, resp_len);
602
603 /* If certs length is invalid then copy the returned length */
604 if (arg->vmm_error == SNP_GUEST_VMM_ERR_INVALID_LEN) {
605 req.certs_len = snp_dev->input.data_npages << PAGE_SHIFT;
606
607 if (copy_to_user((void __user *)arg->req_data, &req, sizeof(req)))
608 ret = -EFAULT;
609 }
610
611 if (ret)
612 goto e_free;
613
614 if (npages &&
615 copy_to_user((void __user *)req.certs_address, snp_dev->certs_data,
616 req.certs_len)) {
617 ret = -EFAULT;
618 goto e_free;
619 }
620
621 if (copy_to_user((void __user *)arg->resp_data, resp, sizeof(*resp)))
622 ret = -EFAULT;
623
624 e_free:
625 kfree(resp);
626 return ret;
627 }
628
snp_guest_ioctl(struct file * file,unsigned int ioctl,unsigned long arg)629 static long snp_guest_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
630 {
631 struct snp_guest_dev *snp_dev = to_snp_dev(file);
632 void __user *argp = (void __user *)arg;
633 struct snp_guest_request_ioctl input;
634 int ret = -ENOTTY;
635
636 if (copy_from_user(&input, argp, sizeof(input)))
637 return -EFAULT;
638
639 input.exitinfo2 = 0xff;
640
641 /* Message version must be non-zero */
642 if (!input.msg_version)
643 return -EINVAL;
644
645 mutex_lock(&snp_cmd_mutex);
646
647 /* Check if the VMPCK is not empty */
648 if (is_vmpck_empty(snp_dev)) {
649 dev_err_ratelimited(snp_dev->dev, "VMPCK is disabled\n");
650 mutex_unlock(&snp_cmd_mutex);
651 return -ENOTTY;
652 }
653
654 switch (ioctl) {
655 case SNP_GET_REPORT:
656 ret = get_report(snp_dev, &input);
657 break;
658 case SNP_GET_DERIVED_KEY:
659 ret = get_derived_key(snp_dev, &input);
660 break;
661 case SNP_GET_EXT_REPORT:
662 ret = get_ext_report(snp_dev, &input);
663 break;
664 default:
665 break;
666 }
667
668 mutex_unlock(&snp_cmd_mutex);
669
670 if (input.exitinfo2 && copy_to_user(argp, &input, sizeof(input)))
671 return -EFAULT;
672
673 return ret;
674 }
675
free_shared_pages(void * buf,size_t sz)676 static void free_shared_pages(void *buf, size_t sz)
677 {
678 unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
679 int ret;
680
681 if (!buf)
682 return;
683
684 ret = set_memory_encrypted((unsigned long)buf, npages);
685 if (ret) {
686 WARN_ONCE(ret, "failed to restore encryption mask (leak it)\n");
687 return;
688 }
689
690 __free_pages(virt_to_page(buf), get_order(sz));
691 }
692
alloc_shared_pages(struct device * dev,size_t sz)693 static void *alloc_shared_pages(struct device *dev, size_t sz)
694 {
695 unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
696 struct page *page;
697 int ret;
698
699 page = alloc_pages(GFP_KERNEL_ACCOUNT, get_order(sz));
700 if (!page)
701 return NULL;
702
703 ret = set_memory_decrypted((unsigned long)page_address(page), npages);
704 if (ret) {
705 dev_err(dev, "failed to mark page shared, ret=%d\n", ret);
706 __free_pages(page, get_order(sz));
707 return NULL;
708 }
709
710 return page_address(page);
711 }
712
713 static const struct file_operations snp_guest_fops = {
714 .owner = THIS_MODULE,
715 .unlocked_ioctl = snp_guest_ioctl,
716 };
717
get_vmpck(int id,struct snp_secrets_page_layout * layout,u32 ** seqno)718 static u8 *get_vmpck(int id, struct snp_secrets_page_layout *layout, u32 **seqno)
719 {
720 u8 *key = NULL;
721
722 switch (id) {
723 case 0:
724 *seqno = &layout->os_area.msg_seqno_0;
725 key = layout->vmpck0;
726 break;
727 case 1:
728 *seqno = &layout->os_area.msg_seqno_1;
729 key = layout->vmpck1;
730 break;
731 case 2:
732 *seqno = &layout->os_area.msg_seqno_2;
733 key = layout->vmpck2;
734 break;
735 case 3:
736 *seqno = &layout->os_area.msg_seqno_3;
737 key = layout->vmpck3;
738 break;
739 default:
740 break;
741 }
742
743 return key;
744 }
745
sev_guest_probe(struct platform_device * pdev)746 static int __init sev_guest_probe(struct platform_device *pdev)
747 {
748 struct snp_secrets_page_layout *layout;
749 struct sev_guest_platform_data *data;
750 struct device *dev = &pdev->dev;
751 struct snp_guest_dev *snp_dev;
752 struct miscdevice *misc;
753 void __iomem *mapping;
754 int ret;
755
756 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
757 return -ENODEV;
758
759 if (!dev->platform_data)
760 return -ENODEV;
761
762 data = (struct sev_guest_platform_data *)dev->platform_data;
763 mapping = ioremap_encrypted(data->secrets_gpa, PAGE_SIZE);
764 if (!mapping)
765 return -ENODEV;
766
767 layout = (__force void *)mapping;
768
769 ret = -ENOMEM;
770 snp_dev = devm_kzalloc(&pdev->dev, sizeof(struct snp_guest_dev), GFP_KERNEL);
771 if (!snp_dev)
772 goto e_unmap;
773
774 ret = -EINVAL;
775 snp_dev->vmpck = get_vmpck(vmpck_id, layout, &snp_dev->os_area_msg_seqno);
776 if (!snp_dev->vmpck) {
777 dev_err(dev, "invalid vmpck id %d\n", vmpck_id);
778 goto e_unmap;
779 }
780
781 /* Verify that VMPCK is not zero. */
782 if (is_vmpck_empty(snp_dev)) {
783 dev_err(dev, "vmpck id %d is null\n", vmpck_id);
784 goto e_unmap;
785 }
786
787 platform_set_drvdata(pdev, snp_dev);
788 snp_dev->dev = dev;
789 snp_dev->layout = layout;
790
791 /* Allocate the shared page used for the request and response message. */
792 snp_dev->request = alloc_shared_pages(dev, sizeof(struct snp_guest_msg));
793 if (!snp_dev->request)
794 goto e_unmap;
795
796 snp_dev->response = alloc_shared_pages(dev, sizeof(struct snp_guest_msg));
797 if (!snp_dev->response)
798 goto e_free_request;
799
800 snp_dev->certs_data = alloc_shared_pages(dev, SEV_FW_BLOB_MAX_SIZE);
801 if (!snp_dev->certs_data)
802 goto e_free_response;
803
804 ret = -EIO;
805 snp_dev->crypto = init_crypto(snp_dev, snp_dev->vmpck, VMPCK_KEY_LEN);
806 if (!snp_dev->crypto)
807 goto e_free_cert_data;
808
809 misc = &snp_dev->misc;
810 misc->minor = MISC_DYNAMIC_MINOR;
811 misc->name = DEVICE_NAME;
812 misc->fops = &snp_guest_fops;
813
814 /* initial the input address for guest request */
815 snp_dev->input.req_gpa = __pa(snp_dev->request);
816 snp_dev->input.resp_gpa = __pa(snp_dev->response);
817 snp_dev->input.data_gpa = __pa(snp_dev->certs_data);
818
819 ret = misc_register(misc);
820 if (ret)
821 goto e_free_cert_data;
822
823 dev_info(dev, "Initialized SEV guest driver (using vmpck_id %d)\n", vmpck_id);
824 return 0;
825
826 e_free_cert_data:
827 free_shared_pages(snp_dev->certs_data, SEV_FW_BLOB_MAX_SIZE);
828 e_free_response:
829 free_shared_pages(snp_dev->response, sizeof(struct snp_guest_msg));
830 e_free_request:
831 free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg));
832 e_unmap:
833 iounmap(mapping);
834 return ret;
835 }
836
sev_guest_remove(struct platform_device * pdev)837 static int __exit sev_guest_remove(struct platform_device *pdev)
838 {
839 struct snp_guest_dev *snp_dev = platform_get_drvdata(pdev);
840
841 free_shared_pages(snp_dev->certs_data, SEV_FW_BLOB_MAX_SIZE);
842 free_shared_pages(snp_dev->response, sizeof(struct snp_guest_msg));
843 free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg));
844 deinit_crypto(snp_dev->crypto);
845 misc_deregister(&snp_dev->misc);
846
847 return 0;
848 }
849
850 /*
851 * This driver is meant to be a common SEV guest interface driver and to
852 * support any SEV guest API. As such, even though it has been introduced
853 * with the SEV-SNP support, it is named "sev-guest".
854 */
855 static struct platform_driver sev_guest_driver = {
856 .remove = __exit_p(sev_guest_remove),
857 .driver = {
858 .name = "sev-guest",
859 },
860 };
861
862 module_platform_driver_probe(sev_guest_driver, sev_guest_probe);
863
864 MODULE_AUTHOR("Brijesh Singh <brijesh.singh@amd.com>");
865 MODULE_LICENSE("GPL");
866 MODULE_VERSION("1.0.0");
867 MODULE_DESCRIPTION("AMD SEV Guest Driver");
868 MODULE_ALIAS("platform:sev-guest");
869