1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2016-20 Intel Corporation. */
3
4 #include <linux/lockdep.h>
5 #include <linux/mm.h>
6 #include <linux/mman.h>
7 #include <linux/shmem_fs.h>
8 #include <linux/suspend.h>
9 #include <linux/sched/mm.h>
10 #include <asm/sgx.h>
11 #include "encl.h"
12 #include "encls.h"
13 #include "sgx.h"
14
15 static int sgx_encl_lookup_backing(struct sgx_encl *encl, unsigned long page_index,
16 struct sgx_backing *backing);
17
18 #define PCMDS_PER_PAGE (PAGE_SIZE / sizeof(struct sgx_pcmd))
19 /*
20 * 32 PCMD entries share a PCMD page. PCMD_FIRST_MASK is used to
21 * determine the page index associated with the first PCMD entry
22 * within a PCMD page.
23 */
24 #define PCMD_FIRST_MASK GENMASK(4, 0)
25
26 /**
27 * reclaimer_writing_to_pcmd() - Query if any enclave page associated with
28 * a PCMD page is in process of being reclaimed.
29 * @encl: Enclave to which PCMD page belongs
30 * @start_addr: Address of enclave page using first entry within the PCMD page
31 *
32 * When an enclave page is reclaimed some Paging Crypto MetaData (PCMD) is
33 * stored. The PCMD data of a reclaimed enclave page contains enough
34 * information for the processor to verify the page at the time
35 * it is loaded back into the Enclave Page Cache (EPC).
36 *
37 * The backing storage to which enclave pages are reclaimed is laid out as
38 * follows:
39 * Encrypted enclave pages:SECS page:PCMD pages
40 *
41 * Each PCMD page contains the PCMD metadata of
42 * PAGE_SIZE/sizeof(struct sgx_pcmd) enclave pages.
43 *
44 * A PCMD page can only be truncated if it is (a) empty, and (b) not in the
45 * process of getting data (and thus soon being non-empty). (b) is tested with
46 * a check if an enclave page sharing the PCMD page is in the process of being
47 * reclaimed.
48 *
49 * The reclaimer sets the SGX_ENCL_PAGE_BEING_RECLAIMED flag when it
50 * intends to reclaim that enclave page - it means that the PCMD page
51 * associated with that enclave page is about to get some data and thus
52 * even if the PCMD page is empty, it should not be truncated.
53 *
54 * Context: Enclave mutex (&sgx_encl->lock) must be held.
55 * Return: 1 if the reclaimer is about to write to the PCMD page
56 * 0 if the reclaimer has no intention to write to the PCMD page
57 */
reclaimer_writing_to_pcmd(struct sgx_encl * encl,unsigned long start_addr)58 static int reclaimer_writing_to_pcmd(struct sgx_encl *encl,
59 unsigned long start_addr)
60 {
61 int reclaimed = 0;
62 int i;
63
64 /*
65 * PCMD_FIRST_MASK is based on number of PCMD entries within
66 * PCMD page being 32.
67 */
68 BUILD_BUG_ON(PCMDS_PER_PAGE != 32);
69
70 for (i = 0; i < PCMDS_PER_PAGE; i++) {
71 struct sgx_encl_page *entry;
72 unsigned long addr;
73
74 addr = start_addr + i * PAGE_SIZE;
75
76 /*
77 * Stop when reaching the SECS page - it does not
78 * have a page_array entry and its reclaim is
79 * started and completed with enclave mutex held so
80 * it does not use the SGX_ENCL_PAGE_BEING_RECLAIMED
81 * flag.
82 */
83 if (addr == encl->base + encl->size)
84 break;
85
86 entry = xa_load(&encl->page_array, PFN_DOWN(addr));
87 if (!entry)
88 continue;
89
90 /*
91 * VA page slot ID uses same bit as the flag so it is important
92 * to ensure that the page is not already in backing store.
93 */
94 if (entry->epc_page &&
95 (entry->desc & SGX_ENCL_PAGE_BEING_RECLAIMED)) {
96 reclaimed = 1;
97 break;
98 }
99 }
100
101 return reclaimed;
102 }
103
104 /*
105 * Calculate byte offset of a PCMD struct associated with an enclave page. PCMD's
106 * follow right after the EPC data in the backing storage. In addition to the
107 * visible enclave pages, there's one extra page slot for SECS, before PCMD
108 * structs.
109 */
sgx_encl_get_backing_page_pcmd_offset(struct sgx_encl * encl,unsigned long page_index)110 static inline pgoff_t sgx_encl_get_backing_page_pcmd_offset(struct sgx_encl *encl,
111 unsigned long page_index)
112 {
113 pgoff_t epc_end_off = encl->size + sizeof(struct sgx_secs);
114
115 return epc_end_off + page_index * sizeof(struct sgx_pcmd);
116 }
117
118 /*
119 * Free a page from the backing storage in the given page index.
120 */
sgx_encl_truncate_backing_page(struct sgx_encl * encl,unsigned long page_index)121 static inline void sgx_encl_truncate_backing_page(struct sgx_encl *encl, unsigned long page_index)
122 {
123 struct inode *inode = file_inode(encl->backing);
124
125 shmem_truncate_range(inode, PFN_PHYS(page_index), PFN_PHYS(page_index) + PAGE_SIZE - 1);
126 }
127
128 /*
129 * ELDU: Load an EPC page as unblocked. For more info, see "OS Management of EPC
130 * Pages" in the SDM.
131 */
__sgx_encl_eldu(struct sgx_encl_page * encl_page,struct sgx_epc_page * epc_page,struct sgx_epc_page * secs_page)132 static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
133 struct sgx_epc_page *epc_page,
134 struct sgx_epc_page *secs_page)
135 {
136 unsigned long va_offset = encl_page->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK;
137 struct sgx_encl *encl = encl_page->encl;
138 pgoff_t page_index, page_pcmd_off;
139 unsigned long pcmd_first_page;
140 struct sgx_pageinfo pginfo;
141 struct sgx_backing b;
142 bool pcmd_page_empty;
143 u8 *pcmd_page;
144 int ret;
145
146 if (secs_page)
147 page_index = PFN_DOWN(encl_page->desc - encl_page->encl->base);
148 else
149 page_index = PFN_DOWN(encl->size);
150
151 /*
152 * Address of enclave page using the first entry within the PCMD page.
153 */
154 pcmd_first_page = PFN_PHYS(page_index & ~PCMD_FIRST_MASK) + encl->base;
155
156 page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index);
157
158 ret = sgx_encl_lookup_backing(encl, page_index, &b);
159 if (ret)
160 return ret;
161
162 pginfo.addr = encl_page->desc & PAGE_MASK;
163 pginfo.contents = (unsigned long)kmap_atomic(b.contents);
164 pcmd_page = kmap_atomic(b.pcmd);
165 pginfo.metadata = (unsigned long)pcmd_page + b.pcmd_offset;
166
167 if (secs_page)
168 pginfo.secs = (u64)sgx_get_epc_virt_addr(secs_page);
169 else
170 pginfo.secs = 0;
171
172 ret = __eldu(&pginfo, sgx_get_epc_virt_addr(epc_page),
173 sgx_get_epc_virt_addr(encl_page->va_page->epc_page) + va_offset);
174 if (ret) {
175 if (encls_failed(ret))
176 ENCLS_WARN(ret, "ELDU");
177
178 ret = -EFAULT;
179 }
180
181 memset(pcmd_page + b.pcmd_offset, 0, sizeof(struct sgx_pcmd));
182 set_page_dirty(b.pcmd);
183
184 /*
185 * The area for the PCMD in the page was zeroed above. Check if the
186 * whole page is now empty meaning that all PCMD's have been zeroed:
187 */
188 pcmd_page_empty = !memchr_inv(pcmd_page, 0, PAGE_SIZE);
189
190 kunmap_atomic(pcmd_page);
191 kunmap_atomic((void *)(unsigned long)pginfo.contents);
192
193 get_page(b.pcmd);
194 sgx_encl_put_backing(&b);
195
196 sgx_encl_truncate_backing_page(encl, page_index);
197
198 if (pcmd_page_empty && !reclaimer_writing_to_pcmd(encl, pcmd_first_page)) {
199 sgx_encl_truncate_backing_page(encl, PFN_DOWN(page_pcmd_off));
200 pcmd_page = kmap_atomic(b.pcmd);
201 if (memchr_inv(pcmd_page, 0, PAGE_SIZE))
202 pr_warn("PCMD page not empty after truncate.\n");
203 kunmap_atomic(pcmd_page);
204 }
205
206 put_page(b.pcmd);
207
208 return ret;
209 }
210
sgx_encl_eldu(struct sgx_encl_page * encl_page,struct sgx_epc_page * secs_page)211 static struct sgx_epc_page *sgx_encl_eldu(struct sgx_encl_page *encl_page,
212 struct sgx_epc_page *secs_page)
213 {
214
215 unsigned long va_offset = encl_page->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK;
216 struct sgx_encl *encl = encl_page->encl;
217 struct sgx_epc_page *epc_page;
218 int ret;
219
220 epc_page = sgx_alloc_epc_page(encl_page, false);
221 if (IS_ERR(epc_page))
222 return epc_page;
223
224 ret = __sgx_encl_eldu(encl_page, epc_page, secs_page);
225 if (ret) {
226 sgx_encl_free_epc_page(epc_page);
227 return ERR_PTR(ret);
228 }
229
230 sgx_free_va_slot(encl_page->va_page, va_offset);
231 list_move(&encl_page->va_page->list, &encl->va_pages);
232 encl_page->desc &= ~SGX_ENCL_PAGE_VA_OFFSET_MASK;
233 encl_page->epc_page = epc_page;
234
235 return epc_page;
236 }
237
__sgx_encl_load_page(struct sgx_encl * encl,struct sgx_encl_page * entry)238 static struct sgx_encl_page *__sgx_encl_load_page(struct sgx_encl *encl,
239 struct sgx_encl_page *entry)
240 {
241 struct sgx_epc_page *epc_page;
242
243 /* Entry successfully located. */
244 if (entry->epc_page) {
245 if (entry->desc & SGX_ENCL_PAGE_BEING_RECLAIMED)
246 return ERR_PTR(-EBUSY);
247
248 return entry;
249 }
250
251 if (!(encl->secs.epc_page)) {
252 epc_page = sgx_encl_eldu(&encl->secs, NULL);
253 if (IS_ERR(epc_page))
254 return ERR_CAST(epc_page);
255 }
256
257 epc_page = sgx_encl_eldu(entry, encl->secs.epc_page);
258 if (IS_ERR(epc_page))
259 return ERR_CAST(epc_page);
260
261 encl->secs_child_cnt++;
262 sgx_mark_page_reclaimable(entry->epc_page);
263
264 return entry;
265 }
266
sgx_encl_load_page_in_vma(struct sgx_encl * encl,unsigned long addr,unsigned long vm_flags)267 static struct sgx_encl_page *sgx_encl_load_page_in_vma(struct sgx_encl *encl,
268 unsigned long addr,
269 unsigned long vm_flags)
270 {
271 unsigned long vm_prot_bits = vm_flags & (VM_READ | VM_WRITE | VM_EXEC);
272 struct sgx_encl_page *entry;
273
274 entry = xa_load(&encl->page_array, PFN_DOWN(addr));
275 if (!entry)
276 return ERR_PTR(-EFAULT);
277
278 /*
279 * Verify that the page has equal or higher build time
280 * permissions than the VMA permissions (i.e. the subset of {VM_READ,
281 * VM_WRITE, VM_EXECUTE} in vma->vm_flags).
282 */
283 if ((entry->vm_max_prot_bits & vm_prot_bits) != vm_prot_bits)
284 return ERR_PTR(-EFAULT);
285
286 return __sgx_encl_load_page(encl, entry);
287 }
288
sgx_encl_load_page(struct sgx_encl * encl,unsigned long addr)289 struct sgx_encl_page *sgx_encl_load_page(struct sgx_encl *encl,
290 unsigned long addr)
291 {
292 struct sgx_encl_page *entry;
293
294 entry = xa_load(&encl->page_array, PFN_DOWN(addr));
295 if (!entry)
296 return ERR_PTR(-EFAULT);
297
298 return __sgx_encl_load_page(encl, entry);
299 }
300
301 /**
302 * sgx_encl_eaug_page() - Dynamically add page to initialized enclave
303 * @vma: VMA obtained from fault info from where page is accessed
304 * @encl: enclave accessing the page
305 * @addr: address that triggered the page fault
306 *
307 * When an initialized enclave accesses a page with no backing EPC page
308 * on a SGX2 system then the EPC can be added dynamically via the SGX2
309 * ENCLS[EAUG] instruction.
310 *
311 * Returns: Appropriate vm_fault_t: VM_FAULT_NOPAGE when PTE was installed
312 * successfully, VM_FAULT_SIGBUS or VM_FAULT_OOM as error otherwise.
313 */
sgx_encl_eaug_page(struct vm_area_struct * vma,struct sgx_encl * encl,unsigned long addr)314 static vm_fault_t sgx_encl_eaug_page(struct vm_area_struct *vma,
315 struct sgx_encl *encl, unsigned long addr)
316 {
317 vm_fault_t vmret = VM_FAULT_SIGBUS;
318 struct sgx_pageinfo pginfo = {0};
319 struct sgx_encl_page *encl_page;
320 struct sgx_epc_page *epc_page;
321 struct sgx_va_page *va_page;
322 unsigned long phys_addr;
323 u64 secinfo_flags;
324 int ret;
325
326 if (!test_bit(SGX_ENCL_INITIALIZED, &encl->flags))
327 return VM_FAULT_SIGBUS;
328
329 /*
330 * Ignore internal permission checking for dynamically added pages.
331 * They matter only for data added during the pre-initialization
332 * phase. The enclave decides the permissions by the means of
333 * EACCEPT, EACCEPTCOPY and EMODPE.
334 */
335 secinfo_flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_X;
336 encl_page = sgx_encl_page_alloc(encl, addr - encl->base, secinfo_flags);
337 if (IS_ERR(encl_page))
338 return VM_FAULT_OOM;
339
340 mutex_lock(&encl->lock);
341
342 epc_page = sgx_alloc_epc_page(encl_page, false);
343 if (IS_ERR(epc_page)) {
344 if (PTR_ERR(epc_page) == -EBUSY)
345 vmret = VM_FAULT_NOPAGE;
346 goto err_out_unlock;
347 }
348
349 va_page = sgx_encl_grow(encl, false);
350 if (IS_ERR(va_page)) {
351 if (PTR_ERR(va_page) == -EBUSY)
352 vmret = VM_FAULT_NOPAGE;
353 goto err_out_epc;
354 }
355
356 if (va_page)
357 list_add(&va_page->list, &encl->va_pages);
358
359 ret = xa_insert(&encl->page_array, PFN_DOWN(encl_page->desc),
360 encl_page, GFP_KERNEL);
361 /*
362 * If ret == -EBUSY then page was created in another flow while
363 * running without encl->lock
364 */
365 if (ret)
366 goto err_out_shrink;
367
368 pginfo.secs = (unsigned long)sgx_get_epc_virt_addr(encl->secs.epc_page);
369 pginfo.addr = encl_page->desc & PAGE_MASK;
370 pginfo.metadata = 0;
371
372 ret = __eaug(&pginfo, sgx_get_epc_virt_addr(epc_page));
373 if (ret)
374 goto err_out;
375
376 encl_page->encl = encl;
377 encl_page->epc_page = epc_page;
378 encl_page->type = SGX_PAGE_TYPE_REG;
379 encl->secs_child_cnt++;
380
381 sgx_mark_page_reclaimable(encl_page->epc_page);
382
383 phys_addr = sgx_get_epc_phys_addr(epc_page);
384 /*
385 * Do not undo everything when creating PTE entry fails - next #PF
386 * would find page ready for a PTE.
387 */
388 vmret = vmf_insert_pfn(vma, addr, PFN_DOWN(phys_addr));
389 if (vmret != VM_FAULT_NOPAGE) {
390 mutex_unlock(&encl->lock);
391 return VM_FAULT_SIGBUS;
392 }
393 mutex_unlock(&encl->lock);
394 return VM_FAULT_NOPAGE;
395
396 err_out:
397 xa_erase(&encl->page_array, PFN_DOWN(encl_page->desc));
398
399 err_out_shrink:
400 sgx_encl_shrink(encl, va_page);
401 err_out_epc:
402 sgx_encl_free_epc_page(epc_page);
403 err_out_unlock:
404 mutex_unlock(&encl->lock);
405 kfree(encl_page);
406
407 return vmret;
408 }
409
sgx_vma_fault(struct vm_fault * vmf)410 static vm_fault_t sgx_vma_fault(struct vm_fault *vmf)
411 {
412 unsigned long addr = (unsigned long)vmf->address;
413 struct vm_area_struct *vma = vmf->vma;
414 struct sgx_encl_page *entry;
415 unsigned long phys_addr;
416 struct sgx_encl *encl;
417 vm_fault_t ret;
418
419 encl = vma->vm_private_data;
420
421 /*
422 * It's very unlikely but possible that allocating memory for the
423 * mm_list entry of a forked process failed in sgx_vma_open(). When
424 * this happens, vm_private_data is set to NULL.
425 */
426 if (unlikely(!encl))
427 return VM_FAULT_SIGBUS;
428
429 /*
430 * The page_array keeps track of all enclave pages, whether they
431 * are swapped out or not. If there is no entry for this page and
432 * the system supports SGX2 then it is possible to dynamically add
433 * a new enclave page. This is only possible for an initialized
434 * enclave that will be checked for right away.
435 */
436 if (cpu_feature_enabled(X86_FEATURE_SGX2) &&
437 (!xa_load(&encl->page_array, PFN_DOWN(addr))))
438 return sgx_encl_eaug_page(vma, encl, addr);
439
440 mutex_lock(&encl->lock);
441
442 entry = sgx_encl_load_page_in_vma(encl, addr, vma->vm_flags);
443 if (IS_ERR(entry)) {
444 mutex_unlock(&encl->lock);
445
446 if (PTR_ERR(entry) == -EBUSY)
447 return VM_FAULT_NOPAGE;
448
449 return VM_FAULT_SIGBUS;
450 }
451
452 phys_addr = sgx_get_epc_phys_addr(entry->epc_page);
453
454 ret = vmf_insert_pfn(vma, addr, PFN_DOWN(phys_addr));
455 if (ret != VM_FAULT_NOPAGE) {
456 mutex_unlock(&encl->lock);
457
458 return VM_FAULT_SIGBUS;
459 }
460
461 sgx_encl_test_and_clear_young(vma->vm_mm, entry);
462 mutex_unlock(&encl->lock);
463
464 return VM_FAULT_NOPAGE;
465 }
466
sgx_vma_open(struct vm_area_struct * vma)467 static void sgx_vma_open(struct vm_area_struct *vma)
468 {
469 struct sgx_encl *encl = vma->vm_private_data;
470
471 /*
472 * It's possible but unlikely that vm_private_data is NULL. This can
473 * happen in a grandchild of a process, when sgx_encl_mm_add() had
474 * failed to allocate memory in this callback.
475 */
476 if (unlikely(!encl))
477 return;
478
479 if (sgx_encl_mm_add(encl, vma->vm_mm))
480 vma->vm_private_data = NULL;
481 }
482
483
484 /**
485 * sgx_encl_may_map() - Check if a requested VMA mapping is allowed
486 * @encl: an enclave pointer
487 * @start: lower bound of the address range, inclusive
488 * @end: upper bound of the address range, exclusive
489 * @vm_flags: VMA flags
490 *
491 * Iterate through the enclave pages contained within [@start, @end) to verify
492 * that the permissions requested by a subset of {VM_READ, VM_WRITE, VM_EXEC}
493 * do not contain any permissions that are not contained in the build time
494 * permissions of any of the enclave pages within the given address range.
495 *
496 * An enclave creator must declare the strongest permissions that will be
497 * needed for each enclave page. This ensures that mappings have the identical
498 * or weaker permissions than the earlier declared permissions.
499 *
500 * Return: 0 on success, -EACCES otherwise
501 */
sgx_encl_may_map(struct sgx_encl * encl,unsigned long start,unsigned long end,unsigned long vm_flags)502 int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
503 unsigned long end, unsigned long vm_flags)
504 {
505 unsigned long vm_prot_bits = vm_flags & (VM_READ | VM_WRITE | VM_EXEC);
506 struct sgx_encl_page *page;
507 unsigned long count = 0;
508 int ret = 0;
509
510 XA_STATE(xas, &encl->page_array, PFN_DOWN(start));
511
512 /* Disallow mapping outside enclave's address range. */
513 if (test_bit(SGX_ENCL_INITIALIZED, &encl->flags) &&
514 (start < encl->base || end > encl->base + encl->size))
515 return -EACCES;
516
517 /*
518 * Disallow READ_IMPLIES_EXEC tasks as their VMA permissions might
519 * conflict with the enclave page permissions.
520 */
521 if (current->personality & READ_IMPLIES_EXEC)
522 return -EACCES;
523
524 mutex_lock(&encl->lock);
525 xas_lock(&xas);
526 xas_for_each(&xas, page, PFN_DOWN(end - 1)) {
527 if (~page->vm_max_prot_bits & vm_prot_bits) {
528 ret = -EACCES;
529 break;
530 }
531
532 /* Reschedule on every XA_CHECK_SCHED iteration. */
533 if (!(++count % XA_CHECK_SCHED)) {
534 xas_pause(&xas);
535 xas_unlock(&xas);
536 mutex_unlock(&encl->lock);
537
538 cond_resched();
539
540 mutex_lock(&encl->lock);
541 xas_lock(&xas);
542 }
543 }
544 xas_unlock(&xas);
545 mutex_unlock(&encl->lock);
546
547 return ret;
548 }
549
sgx_vma_mprotect(struct vm_area_struct * vma,unsigned long start,unsigned long end,unsigned long newflags)550 static int sgx_vma_mprotect(struct vm_area_struct *vma, unsigned long start,
551 unsigned long end, unsigned long newflags)
552 {
553 return sgx_encl_may_map(vma->vm_private_data, start, end, newflags);
554 }
555
sgx_encl_debug_read(struct sgx_encl * encl,struct sgx_encl_page * page,unsigned long addr,void * data)556 static int sgx_encl_debug_read(struct sgx_encl *encl, struct sgx_encl_page *page,
557 unsigned long addr, void *data)
558 {
559 unsigned long offset = addr & ~PAGE_MASK;
560 int ret;
561
562
563 ret = __edbgrd(sgx_get_epc_virt_addr(page->epc_page) + offset, data);
564 if (ret)
565 return -EIO;
566
567 return 0;
568 }
569
sgx_encl_debug_write(struct sgx_encl * encl,struct sgx_encl_page * page,unsigned long addr,void * data)570 static int sgx_encl_debug_write(struct sgx_encl *encl, struct sgx_encl_page *page,
571 unsigned long addr, void *data)
572 {
573 unsigned long offset = addr & ~PAGE_MASK;
574 int ret;
575
576 ret = __edbgwr(sgx_get_epc_virt_addr(page->epc_page) + offset, data);
577 if (ret)
578 return -EIO;
579
580 return 0;
581 }
582
583 /*
584 * Load an enclave page to EPC if required, and take encl->lock.
585 */
sgx_encl_reserve_page(struct sgx_encl * encl,unsigned long addr,unsigned long vm_flags)586 static struct sgx_encl_page *sgx_encl_reserve_page(struct sgx_encl *encl,
587 unsigned long addr,
588 unsigned long vm_flags)
589 {
590 struct sgx_encl_page *entry;
591
592 for ( ; ; ) {
593 mutex_lock(&encl->lock);
594
595 entry = sgx_encl_load_page_in_vma(encl, addr, vm_flags);
596 if (PTR_ERR(entry) != -EBUSY)
597 break;
598
599 mutex_unlock(&encl->lock);
600 }
601
602 if (IS_ERR(entry))
603 mutex_unlock(&encl->lock);
604
605 return entry;
606 }
607
sgx_vma_access(struct vm_area_struct * vma,unsigned long addr,void * buf,int len,int write)608 static int sgx_vma_access(struct vm_area_struct *vma, unsigned long addr,
609 void *buf, int len, int write)
610 {
611 struct sgx_encl *encl = vma->vm_private_data;
612 struct sgx_encl_page *entry = NULL;
613 char data[sizeof(unsigned long)];
614 unsigned long align;
615 int offset;
616 int cnt;
617 int ret = 0;
618 int i;
619
620 /*
621 * If process was forked, VMA is still there but vm_private_data is set
622 * to NULL.
623 */
624 if (!encl)
625 return -EFAULT;
626
627 if (!test_bit(SGX_ENCL_DEBUG, &encl->flags))
628 return -EFAULT;
629
630 for (i = 0; i < len; i += cnt) {
631 entry = sgx_encl_reserve_page(encl, (addr + i) & PAGE_MASK,
632 vma->vm_flags);
633 if (IS_ERR(entry)) {
634 ret = PTR_ERR(entry);
635 break;
636 }
637
638 align = ALIGN_DOWN(addr + i, sizeof(unsigned long));
639 offset = (addr + i) & (sizeof(unsigned long) - 1);
640 cnt = sizeof(unsigned long) - offset;
641 cnt = min(cnt, len - i);
642
643 ret = sgx_encl_debug_read(encl, entry, align, data);
644 if (ret)
645 goto out;
646
647 if (write) {
648 memcpy(data + offset, buf + i, cnt);
649 ret = sgx_encl_debug_write(encl, entry, align, data);
650 if (ret)
651 goto out;
652 } else {
653 memcpy(buf + i, data + offset, cnt);
654 }
655
656 out:
657 mutex_unlock(&encl->lock);
658
659 if (ret)
660 break;
661 }
662
663 return ret < 0 ? ret : i;
664 }
665
666 const struct vm_operations_struct sgx_vm_ops = {
667 .fault = sgx_vma_fault,
668 .mprotect = sgx_vma_mprotect,
669 .open = sgx_vma_open,
670 .access = sgx_vma_access,
671 };
672
673 /**
674 * sgx_encl_release - Destroy an enclave instance
675 * @ref: address of a kref inside &sgx_encl
676 *
677 * Used together with kref_put(). Frees all the resources associated with the
678 * enclave and the instance itself.
679 */
sgx_encl_release(struct kref * ref)680 void sgx_encl_release(struct kref *ref)
681 {
682 struct sgx_encl *encl = container_of(ref, struct sgx_encl, refcount);
683 struct sgx_va_page *va_page;
684 struct sgx_encl_page *entry;
685 unsigned long index;
686
687 xa_for_each(&encl->page_array, index, entry) {
688 if (entry->epc_page) {
689 /*
690 * The page and its radix tree entry cannot be freed
691 * if the page is being held by the reclaimer.
692 */
693 if (sgx_unmark_page_reclaimable(entry->epc_page))
694 continue;
695
696 sgx_encl_free_epc_page(entry->epc_page);
697 encl->secs_child_cnt--;
698 entry->epc_page = NULL;
699 }
700
701 kfree(entry);
702 /* Invoke scheduler to prevent soft lockups. */
703 cond_resched();
704 }
705
706 xa_destroy(&encl->page_array);
707
708 if (!encl->secs_child_cnt && encl->secs.epc_page) {
709 sgx_encl_free_epc_page(encl->secs.epc_page);
710 encl->secs.epc_page = NULL;
711 }
712
713 while (!list_empty(&encl->va_pages)) {
714 va_page = list_first_entry(&encl->va_pages, struct sgx_va_page,
715 list);
716 list_del(&va_page->list);
717 sgx_encl_free_epc_page(va_page->epc_page);
718 kfree(va_page);
719 }
720
721 if (encl->backing)
722 fput(encl->backing);
723
724 cleanup_srcu_struct(&encl->srcu);
725
726 WARN_ON_ONCE(!list_empty(&encl->mm_list));
727
728 /* Detect EPC page leak's. */
729 WARN_ON_ONCE(encl->secs_child_cnt);
730 WARN_ON_ONCE(encl->secs.epc_page);
731
732 kfree(encl);
733 }
734
735 /*
736 * 'mm' is exiting and no longer needs mmu notifications.
737 */
sgx_mmu_notifier_release(struct mmu_notifier * mn,struct mm_struct * mm)738 static void sgx_mmu_notifier_release(struct mmu_notifier *mn,
739 struct mm_struct *mm)
740 {
741 struct sgx_encl_mm *encl_mm = container_of(mn, struct sgx_encl_mm, mmu_notifier);
742 struct sgx_encl_mm *tmp = NULL;
743
744 /*
745 * The enclave itself can remove encl_mm. Note, objects can't be moved
746 * off an RCU protected list, but deletion is ok.
747 */
748 spin_lock(&encl_mm->encl->mm_lock);
749 list_for_each_entry(tmp, &encl_mm->encl->mm_list, list) {
750 if (tmp == encl_mm) {
751 list_del_rcu(&encl_mm->list);
752 break;
753 }
754 }
755 spin_unlock(&encl_mm->encl->mm_lock);
756
757 if (tmp == encl_mm) {
758 synchronize_srcu(&encl_mm->encl->srcu);
759 mmu_notifier_put(mn);
760 }
761 }
762
sgx_mmu_notifier_free(struct mmu_notifier * mn)763 static void sgx_mmu_notifier_free(struct mmu_notifier *mn)
764 {
765 struct sgx_encl_mm *encl_mm = container_of(mn, struct sgx_encl_mm, mmu_notifier);
766
767 /* 'encl_mm' is going away, put encl_mm->encl reference: */
768 kref_put(&encl_mm->encl->refcount, sgx_encl_release);
769
770 kfree(encl_mm);
771 }
772
773 static const struct mmu_notifier_ops sgx_mmu_notifier_ops = {
774 .release = sgx_mmu_notifier_release,
775 .free_notifier = sgx_mmu_notifier_free,
776 };
777
sgx_encl_find_mm(struct sgx_encl * encl,struct mm_struct * mm)778 static struct sgx_encl_mm *sgx_encl_find_mm(struct sgx_encl *encl,
779 struct mm_struct *mm)
780 {
781 struct sgx_encl_mm *encl_mm = NULL;
782 struct sgx_encl_mm *tmp;
783 int idx;
784
785 idx = srcu_read_lock(&encl->srcu);
786
787 list_for_each_entry_rcu(tmp, &encl->mm_list, list) {
788 if (tmp->mm == mm) {
789 encl_mm = tmp;
790 break;
791 }
792 }
793
794 srcu_read_unlock(&encl->srcu, idx);
795
796 return encl_mm;
797 }
798
sgx_encl_mm_add(struct sgx_encl * encl,struct mm_struct * mm)799 int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm)
800 {
801 struct sgx_encl_mm *encl_mm;
802 int ret;
803
804 /*
805 * Even though a single enclave may be mapped into an mm more than once,
806 * each 'mm' only appears once on encl->mm_list. This is guaranteed by
807 * holding the mm's mmap lock for write before an mm can be added or
808 * remove to an encl->mm_list.
809 */
810 mmap_assert_write_locked(mm);
811
812 /*
813 * It's possible that an entry already exists in the mm_list, because it
814 * is removed only on VFS release or process exit.
815 */
816 if (sgx_encl_find_mm(encl, mm))
817 return 0;
818
819 encl_mm = kzalloc(sizeof(*encl_mm), GFP_KERNEL);
820 if (!encl_mm)
821 return -ENOMEM;
822
823 /* Grab a refcount for the encl_mm->encl reference: */
824 kref_get(&encl->refcount);
825 encl_mm->encl = encl;
826 encl_mm->mm = mm;
827 encl_mm->mmu_notifier.ops = &sgx_mmu_notifier_ops;
828
829 ret = __mmu_notifier_register(&encl_mm->mmu_notifier, mm);
830 if (ret) {
831 kfree(encl_mm);
832 return ret;
833 }
834
835 spin_lock(&encl->mm_lock);
836 list_add_rcu(&encl_mm->list, &encl->mm_list);
837 /* Pairs with smp_rmb() in sgx_zap_enclave_ptes(). */
838 smp_wmb();
839 encl->mm_list_version++;
840 spin_unlock(&encl->mm_lock);
841
842 return 0;
843 }
844
845 /**
846 * sgx_encl_cpumask() - Query which CPUs might be accessing the enclave
847 * @encl: the enclave
848 *
849 * Some SGX functions require that no cached linear-to-physical address
850 * mappings are present before they can succeed. For example, ENCLS[EWB]
851 * copies a page from the enclave page cache to regular main memory but
852 * it fails if it cannot ensure that there are no cached
853 * linear-to-physical address mappings referring to the page.
854 *
855 * SGX hardware flushes all cached linear-to-physical mappings on a CPU
856 * when an enclave is exited via ENCLU[EEXIT] or an Asynchronous Enclave
857 * Exit (AEX). Exiting an enclave will thus ensure cached linear-to-physical
858 * address mappings are cleared but coordination with the tracking done within
859 * the SGX hardware is needed to support the SGX functions that depend on this
860 * cache clearing.
861 *
862 * When the ENCLS[ETRACK] function is issued on an enclave the hardware
863 * tracks threads operating inside the enclave at that time. The SGX
864 * hardware tracking require that all the identified threads must have
865 * exited the enclave in order to flush the mappings before a function such
866 * as ENCLS[EWB] will be permitted
867 *
868 * The following flow is used to support SGX functions that require that
869 * no cached linear-to-physical address mappings are present:
870 * 1) Execute ENCLS[ETRACK] to initiate hardware tracking.
871 * 2) Use this function (sgx_encl_cpumask()) to query which CPUs might be
872 * accessing the enclave.
873 * 3) Send IPI to identified CPUs, kicking them out of the enclave and
874 * thus flushing all locally cached linear-to-physical address mappings.
875 * 4) Execute SGX function.
876 *
877 * Context: It is required to call this function after ENCLS[ETRACK].
878 * This will ensure that if any new mm appears (racing with
879 * sgx_encl_mm_add()) then the new mm will enter into the
880 * enclave with fresh linear-to-physical address mappings.
881 *
882 * It is required that all IPIs are completed before a new
883 * ENCLS[ETRACK] is issued so be sure to protect steps 1 to 3
884 * of the above flow with the enclave's mutex.
885 *
886 * Return: cpumask of CPUs that might be accessing @encl
887 */
sgx_encl_cpumask(struct sgx_encl * encl)888 const cpumask_t *sgx_encl_cpumask(struct sgx_encl *encl)
889 {
890 cpumask_t *cpumask = &encl->cpumask;
891 struct sgx_encl_mm *encl_mm;
892 int idx;
893
894 cpumask_clear(cpumask);
895
896 idx = srcu_read_lock(&encl->srcu);
897
898 list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) {
899 if (!mmget_not_zero(encl_mm->mm))
900 continue;
901
902 cpumask_or(cpumask, cpumask, mm_cpumask(encl_mm->mm));
903
904 mmput_async(encl_mm->mm);
905 }
906
907 srcu_read_unlock(&encl->srcu, idx);
908
909 return cpumask;
910 }
911
sgx_encl_get_backing_page(struct sgx_encl * encl,pgoff_t index)912 static struct page *sgx_encl_get_backing_page(struct sgx_encl *encl,
913 pgoff_t index)
914 {
915 struct address_space *mapping = encl->backing->f_mapping;
916 gfp_t gfpmask = mapping_gfp_mask(mapping);
917
918 return shmem_read_mapping_page_gfp(mapping, index, gfpmask);
919 }
920
921 /**
922 * __sgx_encl_get_backing() - Pin the backing storage
923 * @encl: an enclave pointer
924 * @page_index: enclave page index
925 * @backing: data for accessing backing storage for the page
926 *
927 * Pin the backing storage pages for storing the encrypted contents and Paging
928 * Crypto MetaData (PCMD) of an enclave page.
929 *
930 * Return:
931 * 0 on success,
932 * -errno otherwise.
933 */
__sgx_encl_get_backing(struct sgx_encl * encl,unsigned long page_index,struct sgx_backing * backing)934 static int __sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
935 struct sgx_backing *backing)
936 {
937 pgoff_t page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index);
938 struct page *contents;
939 struct page *pcmd;
940
941 contents = sgx_encl_get_backing_page(encl, page_index);
942 if (IS_ERR(contents))
943 return PTR_ERR(contents);
944
945 pcmd = sgx_encl_get_backing_page(encl, PFN_DOWN(page_pcmd_off));
946 if (IS_ERR(pcmd)) {
947 put_page(contents);
948 return PTR_ERR(pcmd);
949 }
950
951 backing->contents = contents;
952 backing->pcmd = pcmd;
953 backing->pcmd_offset = page_pcmd_off & (PAGE_SIZE - 1);
954
955 return 0;
956 }
957
958 /*
959 * When called from ksgxd, returns the mem_cgroup of a struct mm stored
960 * in the enclave's mm_list. When not called from ksgxd, just returns
961 * the mem_cgroup of the current task.
962 */
sgx_encl_get_mem_cgroup(struct sgx_encl * encl)963 static struct mem_cgroup *sgx_encl_get_mem_cgroup(struct sgx_encl *encl)
964 {
965 struct mem_cgroup *memcg = NULL;
966 struct sgx_encl_mm *encl_mm;
967 int idx;
968
969 /*
970 * If called from normal task context, return the mem_cgroup
971 * of the current task's mm. The remainder of the handling is for
972 * ksgxd.
973 */
974 if (!current_is_ksgxd())
975 return get_mem_cgroup_from_mm(current->mm);
976
977 /*
978 * Search the enclave's mm_list to find an mm associated with
979 * this enclave to charge the allocation to.
980 */
981 idx = srcu_read_lock(&encl->srcu);
982
983 list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) {
984 if (!mmget_not_zero(encl_mm->mm))
985 continue;
986
987 memcg = get_mem_cgroup_from_mm(encl_mm->mm);
988
989 mmput_async(encl_mm->mm);
990
991 break;
992 }
993
994 srcu_read_unlock(&encl->srcu, idx);
995
996 /*
997 * In the rare case that there isn't an mm associated with
998 * the enclave, set memcg to the current active mem_cgroup.
999 * This will be the root mem_cgroup if there is no active
1000 * mem_cgroup.
1001 */
1002 if (!memcg)
1003 return get_mem_cgroup_from_mm(NULL);
1004
1005 return memcg;
1006 }
1007
1008 /**
1009 * sgx_encl_alloc_backing() - create a new backing storage page
1010 * @encl: an enclave pointer
1011 * @page_index: enclave page index
1012 * @backing: data for accessing backing storage for the page
1013 *
1014 * When called from ksgxd, sets the active memcg from one of the
1015 * mms in the enclave's mm_list prior to any backing page allocation,
1016 * in order to ensure that shmem page allocations are charged to the
1017 * enclave. Create a backing page for loading data back into an EPC page with
1018 * ELDU. This function takes a reference on a new backing page which
1019 * must be dropped with a corresponding call to sgx_encl_put_backing().
1020 *
1021 * Return:
1022 * 0 on success,
1023 * -errno otherwise.
1024 */
sgx_encl_alloc_backing(struct sgx_encl * encl,unsigned long page_index,struct sgx_backing * backing)1025 int sgx_encl_alloc_backing(struct sgx_encl *encl, unsigned long page_index,
1026 struct sgx_backing *backing)
1027 {
1028 struct mem_cgroup *encl_memcg = sgx_encl_get_mem_cgroup(encl);
1029 struct mem_cgroup *memcg = set_active_memcg(encl_memcg);
1030 int ret;
1031
1032 ret = __sgx_encl_get_backing(encl, page_index, backing);
1033
1034 set_active_memcg(memcg);
1035 mem_cgroup_put(encl_memcg);
1036
1037 return ret;
1038 }
1039
1040 /**
1041 * sgx_encl_lookup_backing() - retrieve an existing backing storage page
1042 * @encl: an enclave pointer
1043 * @page_index: enclave page index
1044 * @backing: data for accessing backing storage for the page
1045 *
1046 * Retrieve a backing page for loading data back into an EPC page with ELDU.
1047 * It is the caller's responsibility to ensure that it is appropriate to use
1048 * sgx_encl_lookup_backing() rather than sgx_encl_alloc_backing(). If lookup is
1049 * not used correctly, this will cause an allocation which is not accounted for.
1050 * This function takes a reference on an existing backing page which must be
1051 * dropped with a corresponding call to sgx_encl_put_backing().
1052 *
1053 * Return:
1054 * 0 on success,
1055 * -errno otherwise.
1056 */
sgx_encl_lookup_backing(struct sgx_encl * encl,unsigned long page_index,struct sgx_backing * backing)1057 static int sgx_encl_lookup_backing(struct sgx_encl *encl, unsigned long page_index,
1058 struct sgx_backing *backing)
1059 {
1060 return __sgx_encl_get_backing(encl, page_index, backing);
1061 }
1062
1063 /**
1064 * sgx_encl_put_backing() - Unpin the backing storage
1065 * @backing: data for accessing backing storage for the page
1066 */
sgx_encl_put_backing(struct sgx_backing * backing)1067 void sgx_encl_put_backing(struct sgx_backing *backing)
1068 {
1069 put_page(backing->pcmd);
1070 put_page(backing->contents);
1071 }
1072
sgx_encl_test_and_clear_young_cb(pte_t * ptep,unsigned long addr,void * data)1073 static int sgx_encl_test_and_clear_young_cb(pte_t *ptep, unsigned long addr,
1074 void *data)
1075 {
1076 pte_t pte;
1077 int ret;
1078
1079 ret = pte_young(*ptep);
1080 if (ret) {
1081 pte = pte_mkold(*ptep);
1082 set_pte_at((struct mm_struct *)data, addr, ptep, pte);
1083 }
1084
1085 return ret;
1086 }
1087
1088 /**
1089 * sgx_encl_test_and_clear_young() - Test and reset the accessed bit
1090 * @mm: mm_struct that is checked
1091 * @page: enclave page to be tested for recent access
1092 *
1093 * Checks the Access (A) bit from the PTE corresponding to the enclave page and
1094 * clears it.
1095 *
1096 * Return: 1 if the page has been recently accessed and 0 if not.
1097 */
sgx_encl_test_and_clear_young(struct mm_struct * mm,struct sgx_encl_page * page)1098 int sgx_encl_test_and_clear_young(struct mm_struct *mm,
1099 struct sgx_encl_page *page)
1100 {
1101 unsigned long addr = page->desc & PAGE_MASK;
1102 struct sgx_encl *encl = page->encl;
1103 struct vm_area_struct *vma;
1104 int ret;
1105
1106 ret = sgx_encl_find(mm, addr, &vma);
1107 if (ret)
1108 return 0;
1109
1110 if (encl != vma->vm_private_data)
1111 return 0;
1112
1113 ret = apply_to_page_range(vma->vm_mm, addr, PAGE_SIZE,
1114 sgx_encl_test_and_clear_young_cb, vma->vm_mm);
1115 if (ret < 0)
1116 return 0;
1117
1118 return ret;
1119 }
1120
sgx_encl_page_alloc(struct sgx_encl * encl,unsigned long offset,u64 secinfo_flags)1121 struct sgx_encl_page *sgx_encl_page_alloc(struct sgx_encl *encl,
1122 unsigned long offset,
1123 u64 secinfo_flags)
1124 {
1125 struct sgx_encl_page *encl_page;
1126 unsigned long prot;
1127
1128 encl_page = kzalloc(sizeof(*encl_page), GFP_KERNEL);
1129 if (!encl_page)
1130 return ERR_PTR(-ENOMEM);
1131
1132 encl_page->desc = encl->base + offset;
1133 encl_page->encl = encl;
1134
1135 prot = _calc_vm_trans(secinfo_flags, SGX_SECINFO_R, PROT_READ) |
1136 _calc_vm_trans(secinfo_flags, SGX_SECINFO_W, PROT_WRITE) |
1137 _calc_vm_trans(secinfo_flags, SGX_SECINFO_X, PROT_EXEC);
1138
1139 /*
1140 * TCS pages must always RW set for CPU access while the SECINFO
1141 * permissions are *always* zero - the CPU ignores the user provided
1142 * values and silently overwrites them with zero permissions.
1143 */
1144 if ((secinfo_flags & SGX_SECINFO_PAGE_TYPE_MASK) == SGX_SECINFO_TCS)
1145 prot |= PROT_READ | PROT_WRITE;
1146
1147 /* Calculate maximum of the VM flags for the page. */
1148 encl_page->vm_max_prot_bits = calc_vm_prot_bits(prot, 0);
1149
1150 return encl_page;
1151 }
1152
1153 /**
1154 * sgx_zap_enclave_ptes() - remove PTEs mapping the address from enclave
1155 * @encl: the enclave
1156 * @addr: page aligned pointer to single page for which PTEs will be removed
1157 *
1158 * Multiple VMAs may have an enclave page mapped. Remove the PTE mapping
1159 * @addr from each VMA. Ensure that page fault handler is ready to handle
1160 * new mappings of @addr before calling this function.
1161 */
sgx_zap_enclave_ptes(struct sgx_encl * encl,unsigned long addr)1162 void sgx_zap_enclave_ptes(struct sgx_encl *encl, unsigned long addr)
1163 {
1164 unsigned long mm_list_version;
1165 struct sgx_encl_mm *encl_mm;
1166 struct vm_area_struct *vma;
1167 int idx, ret;
1168
1169 do {
1170 mm_list_version = encl->mm_list_version;
1171
1172 /* Pairs with smp_wmb() in sgx_encl_mm_add(). */
1173 smp_rmb();
1174
1175 idx = srcu_read_lock(&encl->srcu);
1176
1177 list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) {
1178 if (!mmget_not_zero(encl_mm->mm))
1179 continue;
1180
1181 mmap_read_lock(encl_mm->mm);
1182
1183 ret = sgx_encl_find(encl_mm->mm, addr, &vma);
1184 if (!ret && encl == vma->vm_private_data)
1185 zap_vma_ptes(vma, addr, PAGE_SIZE);
1186
1187 mmap_read_unlock(encl_mm->mm);
1188
1189 mmput_async(encl_mm->mm);
1190 }
1191
1192 srcu_read_unlock(&encl->srcu, idx);
1193 } while (unlikely(encl->mm_list_version != mm_list_version));
1194 }
1195
1196 /**
1197 * sgx_alloc_va_page() - Allocate a Version Array (VA) page
1198 * @reclaim: Reclaim EPC pages directly if none available. Enclave
1199 * mutex should not be held if this is set.
1200 *
1201 * Allocate a free EPC page and convert it to a Version Array (VA) page.
1202 *
1203 * Return:
1204 * a VA page,
1205 * -errno otherwise
1206 */
sgx_alloc_va_page(bool reclaim)1207 struct sgx_epc_page *sgx_alloc_va_page(bool reclaim)
1208 {
1209 struct sgx_epc_page *epc_page;
1210 int ret;
1211
1212 epc_page = sgx_alloc_epc_page(NULL, reclaim);
1213 if (IS_ERR(epc_page))
1214 return ERR_CAST(epc_page);
1215
1216 ret = __epa(sgx_get_epc_virt_addr(epc_page));
1217 if (ret) {
1218 WARN_ONCE(1, "EPA returned %d (0x%x)", ret, ret);
1219 sgx_encl_free_epc_page(epc_page);
1220 return ERR_PTR(-EFAULT);
1221 }
1222
1223 return epc_page;
1224 }
1225
1226 /**
1227 * sgx_alloc_va_slot - allocate a VA slot
1228 * @va_page: a &struct sgx_va_page instance
1229 *
1230 * Allocates a slot from a &struct sgx_va_page instance.
1231 *
1232 * Return: offset of the slot inside the VA page
1233 */
sgx_alloc_va_slot(struct sgx_va_page * va_page)1234 unsigned int sgx_alloc_va_slot(struct sgx_va_page *va_page)
1235 {
1236 int slot = find_first_zero_bit(va_page->slots, SGX_VA_SLOT_COUNT);
1237
1238 if (slot < SGX_VA_SLOT_COUNT)
1239 set_bit(slot, va_page->slots);
1240
1241 return slot << 3;
1242 }
1243
1244 /**
1245 * sgx_free_va_slot - free a VA slot
1246 * @va_page: a &struct sgx_va_page instance
1247 * @offset: offset of the slot inside the VA page
1248 *
1249 * Frees a slot from a &struct sgx_va_page instance.
1250 */
sgx_free_va_slot(struct sgx_va_page * va_page,unsigned int offset)1251 void sgx_free_va_slot(struct sgx_va_page *va_page, unsigned int offset)
1252 {
1253 clear_bit(offset >> 3, va_page->slots);
1254 }
1255
1256 /**
1257 * sgx_va_page_full - is the VA page full?
1258 * @va_page: a &struct sgx_va_page instance
1259 *
1260 * Return: true if all slots have been taken
1261 */
sgx_va_page_full(struct sgx_va_page * va_page)1262 bool sgx_va_page_full(struct sgx_va_page *va_page)
1263 {
1264 int slot = find_first_zero_bit(va_page->slots, SGX_VA_SLOT_COUNT);
1265
1266 return slot == SGX_VA_SLOT_COUNT;
1267 }
1268
1269 /**
1270 * sgx_encl_free_epc_page - free an EPC page assigned to an enclave
1271 * @page: EPC page to be freed
1272 *
1273 * Free an EPC page assigned to an enclave. It does EREMOVE for the page, and
1274 * only upon success, it puts the page back to free page list. Otherwise, it
1275 * gives a WARNING to indicate page is leaked.
1276 */
sgx_encl_free_epc_page(struct sgx_epc_page * page)1277 void sgx_encl_free_epc_page(struct sgx_epc_page *page)
1278 {
1279 int ret;
1280
1281 WARN_ON_ONCE(page->flags & SGX_EPC_PAGE_RECLAIMER_TRACKED);
1282
1283 ret = __eremove(sgx_get_epc_virt_addr(page));
1284 if (WARN_ONCE(ret, EREMOVE_ERROR_MESSAGE, ret, ret))
1285 return;
1286
1287 sgx_free_epc_page(page);
1288 }
1289