diff --git a/keystone-ioctl.c b/keystone-ioctl.c index 2d97f6a25..453f50199 100644 --- a/keystone-ioctl.c +++ b/keystone-ioctl.c @@ -21,6 +21,10 @@ int keystone_create_enclave(struct file *filep, unsigned long arg) return -ENOMEM; } + /* Pass base page table */ + enclp->pt_ptr = __pa(enclave->epm->root_page_table); + enclp->size = enclave->epm->size; + /* allocate UID */ enclp->eid = enclave_idr_alloc(enclave); @@ -62,7 +66,7 @@ int keystone_finalize_enclave(unsigned long arg) // physical addresses for runtime, user, and freemem create_args.runtime_paddr = epm_va_to_pa(enclave->epm, enclp->runtime_vaddr); create_args.user_paddr = epm_va_to_pa(enclave->epm, enclp->user_vaddr); - create_args.free_paddr = epm_get_free_pa(enclave->epm); + create_args.free_paddr = enclp->free_ptr; create_args.params = enclp->params; @@ -75,11 +79,6 @@ int keystone_finalize_enclave(unsigned long arg) goto error_destroy_enclave; } - /* We cleanup the free lists here since the kernel will no longer be - managing them, they are part of the enclave now. */ - utm_clean_free_list(utm); - epm_clean_free_list(enclave->epm); - return 0; error_destroy_enclave: @@ -116,79 +115,6 @@ int keystone_run_enclave(unsigned long arg) return ret; } -int keystone_add_page(unsigned long arg) -{ - int ret = 0; - vaddr_t epm_page; - struct addr_packed *addr = (struct addr_packed *) arg; - unsigned long ueid = addr->eid; - unsigned int mode = addr->mode; - struct enclave *enclave; - - enclave = get_enclave_by_id(ueid); - - if(!enclave) { - keystone_err("invalid enclave id\n"); - return -EINVAL; - } - - switch (mode) { - case USER_NOEXEC: { - epm_alloc_user_page_noexec(enclave->epm, addr->va); - break; - } - case RT_NOEXEC: { - epm_alloc_rt_page_noexec(enclave->epm, addr->va); - break; - } - case RT_FULL: { - epm_page = epm_alloc_rt_page(enclave->epm, addr->va); - if (copy_from_user((void *) epm_page, (void *) addr->copied, PAGE_SIZE) != 0) - ret = -ENOEXEC; - break; - } - case USER_FULL: { - epm_page = epm_alloc_user_page(enclave->epm, addr->va); - if (copy_from_user((void *) epm_page, (void *) addr->copied, PAGE_SIZE) != 0) - ret = -ENOEXEC; - break; - } - default: - ret = -ENOSYS; - } - - return ret; -} - -/* This IOCTL allows user to prepare page tables prior to the actual page allocation. - * This is needed when an enclave requires linear physical layout. - * The user must call this before allocating pages */ -int keystone_alloc_vspace(unsigned long arg) -{ - int ret = 0; - vaddr_t va; - size_t num_pages; - struct enclave* enclave; - struct keystone_ioctl_alloc_vspace* enclp = (struct keystone_ioctl_alloc_vspace *) arg; - - va = enclp->vaddr; - num_pages = PAGE_UP(enclp->size)/PAGE_SIZE; - - enclave = get_enclave_by_id(enclp->eid); - - if(!enclave) { - keystone_err("invalid enclave id\n"); - return -EINVAL; - } - - if (epm_alloc_vspace(enclave->epm, va, num_pages) != num_pages) { - keystone_err("failed to allocate vspace\n"); - return -ENOMEM; - } - - return ret; -} - int utm_init_ioctl(struct file *filp, unsigned long arg) { int ret = 0; @@ -215,24 +141,7 @@ int utm_init_ioctl(struct file *filp, unsigned long arg) /* prepare for mmap */ enclave->utm = utm; - return ret; -} - -int utm_alloc(unsigned long arg) -{ - int ret = 0; - struct enclave *enclave; - struct addr_packed *addr = (struct addr_packed *) arg; - unsigned long ueid = addr->eid; - - enclave = get_enclave_by_id(ueid); - - if(!enclave) { - keystone_err("invalid enclave id\n"); - return -EINVAL; - } - - utm_alloc_page(enclave->utm, enclave->epm, addr->va, PTE_D | PTE_A | PTE_R | PTE_W); + enclp->utm_free_ptr = __pa(utm->ptr); return ret; } @@ -299,7 +208,7 @@ int keystone_resume_enclave(unsigned long arg) long keystone_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { long ret; - char data[256]; + char data[272]; size_t ioc_size; if (!arg) @@ -315,12 +224,6 @@ long keystone_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) case KEYSTONE_IOC_CREATE_ENCLAVE: ret = keystone_create_enclave(filep, (unsigned long) data); break; - case KEYSTONE_IOC_ADD_PAGE: - ret = keystone_add_page((unsigned long) data); - break; - case KEYSTONE_IOC_ALLOC_VSPACE: - ret = keystone_alloc_vspace((unsigned long) data); - break; case KEYSTONE_IOC_FINALIZE_ENCLAVE: ret = keystone_finalize_enclave((unsigned long) data); break; @@ -337,9 +240,6 @@ long keystone_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) * However, there was a weird bug in compiler that generates a wrong control flow * that ends up with an illegal instruction if we combine switch-case and if statements. * We didn't identified the exact problem, so we'll have these until we figure out */ - case KEYSTONE_IOC_UTM_ALLOC: - ret = utm_alloc((unsigned long) data); - break; case KEYSTONE_IOC_UTM_INIT: ret = utm_init_ioctl(filep, (unsigned long) data); break; diff --git a/keystone-page.c b/keystone-page.c index b826a0b2b..6f690ef55 100644 --- a/keystone-page.c +++ b/keystone-page.c @@ -7,49 +7,9 @@ #include "keystone.h" #include -void init_free_pages(struct list_head* pg_list, vaddr_t ptr, unsigned int count) -{ - unsigned int i; - vaddr_t cur; - cur = ptr; - for(i=0; ivaddr; - list_del(&page->freelist); - kfree(page); - - return addr; -} - -void put_free_page(struct list_head* pg_list, vaddr_t page_addr) -{ - struct free_page* page = kmalloc(sizeof(struct free_page),GFP_KERNEL); - page->vaddr = page_addr; - list_add_tail(&page->freelist, pg_list); - return; -} - /* Destroy all memory associated with an EPM */ int epm_destroy(struct epm* epm) { - /* Clean anything in the free list */ - epm_clean_free_list(epm); - if(!epm->ptr || !epm->size) return 0; @@ -76,9 +36,6 @@ int epm_init(struct epm* epm, unsigned int min_pages) unsigned long count = min_pages; phys_addr_t device_phys_addr = 0; - /* Always init the head */ - INIT_LIST_HEAD(&epm->epm_free_list); - /* try to allocate contiguous memory */ epm->is_cma = 0; order = ilog2(min_pages - 1) + 1; @@ -111,15 +68,8 @@ int epm_init(struct epm* epm, unsigned int min_pages) /* zero out */ memset((void*)epm_vaddr, 0, PAGE_SIZE*count); - init_free_pages(&epm->epm_free_list, epm_vaddr, count); - /* The first free page will be the enclave's top-level page table */ - t = (pte_t*) get_free_page(&epm->epm_free_list); - if (!t) { - return -ENOMEM; - } - - epm->root_page_table = t; + epm->root_page_table = epm_vaddr; epm->pa = __pa(epm_vaddr); epm->order = order; epm->size = count << PAGE_SHIFT; @@ -128,25 +78,8 @@ int epm_init(struct epm* epm, unsigned int min_pages) return 0; } -int epm_clean_free_list(struct epm* epm) -{ - struct free_page* page; - struct list_head* pg_list; - pg_list = &epm->epm_free_list; - while (!list_empty(&epm->epm_free_list)) - { - page = list_first_entry(pg_list, struct free_page, freelist); - list_del(&page->freelist); - kfree(page); - } - return 0; -} - int utm_destroy(struct utm* utm){ - /* Clean anything in the free list */ - utm_clean_free_list(utm); - if(utm->ptr != NULL){ free_pages((vaddr_t)utm->ptr, utm->order); } @@ -154,20 +87,6 @@ int utm_destroy(struct utm* utm){ return 0; } -int utm_clean_free_list(struct utm* utm) -{ - struct free_page* page; - struct list_head* pg_list; - pg_list = &utm->utm_free_list; - while (!list_empty(&utm->utm_free_list)) - { - page = list_first_entry(pg_list, struct free_page, freelist); - list_del(&page->freelist); - kfree(page); - } - return 0; -} - int utm_init(struct utm* utm, size_t untrusted_size) { unsigned long req_pages = 0; @@ -192,8 +111,6 @@ int utm_init(struct utm* utm, size_t untrusted_size) keystone_warn("shared buffer size is not multiple of PAGE_SIZE\n"); } - INIT_LIST_HEAD(&utm->utm_free_list); - init_free_pages(&utm->utm_free_list, (vaddr_t)utm->ptr, utm->size/PAGE_SIZE); return 0; } @@ -213,16 +130,6 @@ static size_t pt_idx(vaddr_t addr, int level) return idx & ((1 << RISCV_PGLEVEL_BITS) - 1); } -static pte_t* __ept_walk_create(struct list_head* pg_list, pte_t* root_page_table, vaddr_t addr); - -static pte_t* __ept_continue_walk_create(struct list_head* pg_list, pte_t* root_page_table, vaddr_t addr, pte_t* pte) -{ - unsigned long free_ppn = ppn(get_free_page(pg_list)); - *pte = ptd_create(free_ppn); - //pr_info("ptd_create: ppn = %u, pte = 0x%lx\n", free_ppn, *pte); - return __ept_walk_create(pg_list, root_page_table, addr); -} - static pte_t* __ept_walk_internal(struct list_head* pg_list, pte_t* root_page_table, vaddr_t addr, int create) { pte_t* t = root_page_table; @@ -230,9 +137,11 @@ static pte_t* __ept_walk_internal(struct list_head* pg_list, pte_t* root_page_ta int i; for (i = (VA_BITS - RISCV_PGSHIFT) / RISCV_PGLEVEL_BITS - 1; i > 0; i--) { size_t idx = pt_idx(addr, i); - //pr_info(" level %d: pt_idx %d (%x)\n", i, idx, idx); + /*Since page management is done on the user side, the PTE must be valid + * Otherwise the VA is invalid + * */ if (unlikely(!(pte_val(t[idx]) & PTE_V))) - return create ? __ept_continue_walk_create(pg_list, root_page_table, addr, &t[idx]) : 0; + return -1; t = (pte_t*) __va(pte_ppn(t[idx]) << RISCV_PGSHIFT); } return &t[pt_idx(addr, 0)]; @@ -243,33 +152,6 @@ static pte_t* __ept_walk(struct list_head* pg_list, pte_t* root_page_table, vadd return __ept_walk_internal(pg_list, root_page_table, addr, 0); } -static pte_t* __ept_walk_create(struct list_head* pg_list, pte_t* root_page_table, vaddr_t addr) -{ - return __ept_walk_internal(pg_list, root_page_table, addr, 1); -} - -/* -static int __ept_va_avail(struct epm* epm, vaddr_t vaddr) -{ - pte_t* pte = __ept_walk(epm, vaddr); - return pte == 0 || pte_val(*pte) == 0; -} -*/ - -paddr_t epm_get_free_pa(struct epm* epm) -{ - struct free_page* page; - struct list_head* pg_list; - - pg_list = &(epm->epm_free_list); - - if(list_empty(pg_list)) - return 0; - - page = list_first_entry(pg_list, struct free_page, freelist); - return __pa(page->vaddr); -} - paddr_t epm_va_to_pa(struct epm* epm, vaddr_t addr) { pte_t* pte = __ept_walk(NULL, epm->root_page_table,addr); @@ -278,73 +160,3 @@ paddr_t epm_va_to_pa(struct epm* epm, vaddr_t addr) else return 0; } - -/* This function pre-allocates the required page tables so that - * the virtual addresses are linearly mapped to the physical memory */ -size_t epm_alloc_vspace(struct epm* epm, vaddr_t addr, size_t num_pages) -{ - vaddr_t walk; - size_t count; - - for(walk=addr, count=0; count < num_pages; count++, addr += PAGE_SIZE) - { - pte_t* pte = __ept_walk_create(&epm->epm_free_list, epm->root_page_table, addr); - if(!pte) - break; - } - - return count; -} - - -vaddr_t utm_alloc_page(struct utm* utm, struct epm* epm, vaddr_t addr, unsigned long flags) -{ - vaddr_t page_addr; - pte_t* pte = __ept_walk_create(&epm->epm_free_list, epm->root_page_table, addr); - - /* if the page has been already allocated, return the page */ - if(pte_val(*pte) & PTE_V) { - return (vaddr_t) __va(pte_ppn(*pte) << RISCV_PGSHIFT); - } - - /* otherwise, allocate one from UTM freelist */ - page_addr = get_free_page(&utm->utm_free_list); - *pte = pte_create(ppn(page_addr), flags | PTE_V); - return page_addr; -} - -vaddr_t epm_alloc_page(struct epm* epm, vaddr_t addr, unsigned long flags) -{ - vaddr_t page_addr; - pte_t* pte = __ept_walk_create(&epm->epm_free_list, epm->root_page_table, addr); - - /* if the page has been already allocated, return the page */ - if(pte_val(*pte) & PTE_V) { - return (vaddr_t) __va(pte_ppn(*pte) << RISCV_PGSHIFT); - } - - /* otherwise, allocate one from EPM freelist */ - page_addr = get_free_page(&epm->epm_free_list); - *pte = pte_create(ppn(page_addr), flags | PTE_V); - return page_addr; -} - -vaddr_t epm_alloc_rt_page_noexec(struct epm* epm, vaddr_t addr) -{ - return epm_alloc_page(epm, addr, PTE_D | PTE_A | PTE_R | PTE_W); -} - -vaddr_t epm_alloc_rt_page(struct epm* epm, vaddr_t addr) -{ - return epm_alloc_page(epm, addr, PTE_D | PTE_A | PTE_R | PTE_W | PTE_X); -} - -vaddr_t epm_alloc_user_page_noexec(struct epm* epm, vaddr_t addr) -{ - return epm_alloc_page(epm, addr, PTE_D | PTE_A | PTE_R | PTE_W | PTE_U); -} - -vaddr_t epm_alloc_user_page(struct epm* epm, vaddr_t addr) -{ - return epm_alloc_page(epm, addr, PTE_D | PTE_A | PTE_R | PTE_X | PTE_W | PTE_U); -} diff --git a/keystone.h b/keystone.h index eade0b42e..1b8b4640b 100644 --- a/keystone.h +++ b/keystone.h @@ -62,14 +62,8 @@ long keystone_ioctl(struct file* filep, unsigned int cmd, unsigned long arg); int keystone_release(struct inode *inode, struct file *file); int keystone_mmap(struct file *filp, struct vm_area_struct *vma); -struct free_page { - vaddr_t vaddr; - struct list_head freelist; -}; - /* enclave private memory */ struct epm { - struct list_head epm_free_list; pte_t* root_page_table; vaddr_t ptr; size_t size; @@ -79,7 +73,6 @@ struct epm { }; struct utm { - struct list_head utm_free_list; pte_t* root_page_table; void* ptr; size_t size; @@ -118,25 +111,12 @@ struct enclave* get_enclave_by_id(unsigned int ueid); static inline uintptr_t epm_satp(struct epm* epm) { return ((uintptr_t)epm->root_page_table >> RISCV_PGSHIFT | SATP_MODE_CHOICE); } -void init_free_pages(struct list_head* pg_list, vaddr_t base, unsigned int count); -void put_free_page(struct list_head* pg_list, vaddr_t page_addr); -vaddr_t get_free_page(struct list_head* pg_list); int epm_destroy(struct epm* epm); int epm_init(struct epm* epm, unsigned int count); int utm_destroy(struct utm* utm); int utm_init(struct utm* utm, size_t untrusted_size); -int epm_clean_free_list(struct epm* epm); -int utm_clean_free_list(struct utm* utm); paddr_t epm_va_to_pa(struct epm* epm, vaddr_t addr); -paddr_t epm_get_free_pa(struct epm* epm); -vaddr_t utm_alloc_page(struct utm* utm, struct epm* epm, vaddr_t addr, unsigned long flags); -size_t epm_alloc_vspace(struct epm* epm, vaddr_t addr, size_t num_pages); -vaddr_t epm_alloc_rt_page(struct epm* epm, vaddr_t addr); -vaddr_t epm_alloc_rt_page_noexec(struct epm* epm, vaddr_t addr); -vaddr_t epm_alloc_user_page(struct epm* epm, vaddr_t addr); -vaddr_t epm_alloc_user_page_noexec(struct epm* epm, vaddr_t addr); -void epm_free_page(struct epm* epm, vaddr_t addr); unsigned long calculate_required_pages( diff --git a/keystone_user.h b/keystone_user.h index 7da2d902d..9eff218df 100644 --- a/keystone_user.h +++ b/keystone_user.h @@ -19,16 +19,10 @@ _IOR(KEYSTONE_IOC_MAGIC, 0x04, struct keystone_ioctl_run_enclave) #define KEYSTONE_IOC_RESUME_ENCLAVE \ _IOR(KEYSTONE_IOC_MAGIC, 0x05, struct keystone_ioctl_run_enclave) -#define KEYSTONE_IOC_ADD_PAGE \ - _IOR(KEYSTONE_IOC_MAGIC, 0x06, struct addr_packed) #define KEYSTONE_IOC_FINALIZE_ENCLAVE \ - _IOR(KEYSTONE_IOC_MAGIC, 0x07, struct keystone_ioctl_create_enclave) -#define KEYSTONE_IOC_UTM_ALLOC \ - _IOR(KEYSTONE_IOC_MAGIC, 0x08, struct addr_packed) + _IOR(KEYSTONE_IOC_MAGIC, 0x06, struct keystone_ioctl_create_enclave) #define KEYSTONE_IOC_UTM_INIT \ - _IOR(KEYSTONE_IOC_MAGIC, 0x09, struct keystone_ioctl_create_enclave) -#define KEYSTONE_IOC_ALLOC_VSPACE \ - _IOR(KEYSTONE_IOC_MAGIC, 0x0a, struct keystone_ioctl_alloc_vspace) + _IOR(KEYSTONE_IOC_MAGIC, 0x07, struct keystone_ioctl_create_enclave) #define RT_NOEXEC 0 #define USER_NOEXEC 1 @@ -54,6 +48,10 @@ struct keystone_ioctl_create_enclave { __u64 runtime_vaddr; __u64 user_vaddr; + __u64 pt_ptr; + __u64 free_ptr; + __u64 utm_free_ptr; + __u64 size; // Runtime Parameters struct runtime_params_t params; }; @@ -66,17 +64,4 @@ struct keystone_ioctl_run_enclave { __u64 ret; }; -struct addr_packed { - __u64 va; - __u64 copied; - __u64 eid; - __u64 mode; -}; - -struct keystone_ioctl_alloc_vspace { - __u64 eid; - __u64 vaddr; - __u64 size; -}; - #endif