diff --git a/README.md b/README.md index a5676a6da..949b8f2e5 100644 --- a/README.md +++ b/README.md @@ -1,14 +1,16 @@ # Keystone Enclave Driver -This is a loadable kernel module for Keystone Enclave. -To build the module, make with a proper KDIR path. - -``` -make KDIR= -``` +This is a loadable kernel module for Keystone Enclave. To build the +module, make with the top-level +[Keystone](https://github.com/keystone-enclave/keystone) build +process. # Compatibility +The driver will always work correctly with the version of riscv-linux +pointed to by the top-level +[Keystone](https://github.com/keystone-enclave/keystone) repository. + For the upstream linux, loadable modules for RISC-V only work on kernel versions later than 4.17. To use the module in 4.15, please use this version diff --git a/keystone-enclave.c b/keystone-enclave.c index 86d7b1b9b..2e02aa19f 100644 --- a/keystone-enclave.c +++ b/keystone-enclave.c @@ -4,7 +4,7 @@ //------------------------------------------------------------------------------ #include #include "keystone.h" -/* idr for enclave UID to enclave_t */ +/* idr for enclave UID to struct enclave */ DEFINE_IDR(idr_enclave); DEFINE_SPINLOCK(idr_enclave_lock); @@ -31,10 +31,10 @@ unsigned long calculate_required_pages( } /* Smart destroy, handles partial initialization of epm and utm etc */ -int destroy_enclave(enclave_t* enclave) +int destroy_enclave(struct enclave* enclave) { - epm_t* epm; - utm_t* utm; + struct epm* epm; + struct utm* utm; if (enclave == NULL) return -ENOSYS; @@ -55,19 +55,20 @@ int destroy_enclave(enclave_t* enclave) return 0; } -enclave_t* create_enclave(unsigned long min_pages) +struct enclave* create_enclave(unsigned long min_pages) { - enclave_t* enclave; + struct enclave* enclave; - enclave = kmalloc(sizeof(enclave_t), GFP_KERNEL); + enclave = kmalloc(sizeof(struct enclave), GFP_KERNEL); if (!enclave){ keystone_err("failed to allocate enclave struct\n"); goto error_no_free; } enclave->utm = NULL; + enclave->close_on_pexit = 1; - enclave->epm = kmalloc(sizeof(epm_t), GFP_KERNEL); + enclave->epm = kmalloc(sizeof(struct epm), GFP_KERNEL); if (!enclave->epm) { keystone_err("failed to allocate epm\n"); @@ -86,7 +87,7 @@ enclave_t* create_enclave(unsigned long min_pages) return NULL; } -unsigned int enclave_idr_alloc(enclave_t* enclave) +unsigned int enclave_idr_alloc(struct enclave* enclave) { unsigned int ueid; @@ -102,18 +103,18 @@ unsigned int enclave_idr_alloc(enclave_t* enclave) return ueid; } -enclave_t* enclave_idr_remove(unsigned int ueid) +struct enclave* enclave_idr_remove(unsigned int ueid) { - enclave_t* enclave; + struct enclave* enclave; spin_lock_bh(&idr_enclave_lock); enclave = idr_remove(&idr_enclave, ueid); spin_unlock_bh(&idr_enclave_lock); return enclave; } -enclave_t* get_enclave_by_id(unsigned int ueid) +struct enclave* get_enclave_by_id(unsigned int ueid) { - enclave_t* enclave; + struct enclave* enclave; spin_lock_bh(&idr_enclave_lock); enclave = idr_find(&idr_enclave, ueid); spin_unlock_bh(&idr_enclave_lock); diff --git a/keystone-ioctl.c b/keystone-ioctl.c index d85f3dbbe..2d97f6a25 100644 --- a/keystone-ioctl.c +++ b/keystone-ioctl.c @@ -7,12 +7,14 @@ #include "keystone_user.h" #include -int keystone_create_enclave(unsigned long arg) +int __keystone_destroy_enclave(unsigned int ueid); + +int keystone_create_enclave(struct file *filep, unsigned long arg) { /* create parameters */ struct keystone_ioctl_create_enclave *enclp = (struct keystone_ioctl_create_enclave *) arg; - enclave_t *enclave; + struct enclave *enclave; enclave = create_enclave(enclp->min_pages); if (enclave == NULL) { @@ -22,6 +24,8 @@ int keystone_create_enclave(unsigned long arg) /* allocate UID */ enclp->eid = enclave_idr_alloc(enclave); + filep->private_data = (void *) enclp->eid; + return 0; } @@ -29,14 +33,13 @@ int keystone_create_enclave(unsigned long arg) int keystone_finalize_enclave(unsigned long arg) { int ret; - enclave_t *enclave; - struct utm_t *utm; + struct enclave *enclave; + struct utm *utm; struct keystone_sbi_create_t create_args; struct keystone_ioctl_create_enclave *enclp = (struct keystone_ioctl_create_enclave *) arg; enclave = get_enclave_by_id(enclp->eid); - if(!enclave) { keystone_err("invalid enclave id\n"); return -EINVAL; @@ -63,7 +66,7 @@ int keystone_finalize_enclave(unsigned long arg) create_args.params = enclp->params; - // SM will write the eid to enclave_t.eid + // SM will write the eid to struct enclave.eid create_args.eid_pptr = (unsigned int *) __pa(&enclave->eid); ret = SBI_CALL_1(SBI_SM_CREATE_ENCLAVE, __pa(&create_args)); @@ -91,7 +94,7 @@ int keystone_run_enclave(unsigned long arg) { int ret = 0; unsigned long ueid; - enclave_t* enclave; + struct enclave* enclave; struct keystone_ioctl_run_enclave *run = (struct keystone_ioctl_run_enclave*) arg; ueid = run->eid; @@ -120,7 +123,7 @@ int keystone_add_page(unsigned long arg) struct addr_packed *addr = (struct addr_packed *) arg; unsigned long ueid = addr->eid; unsigned int mode = addr->mode; - enclave_t *enclave; + struct enclave *enclave; enclave = get_enclave_by_id(ueid); @@ -165,7 +168,7 @@ int keystone_alloc_vspace(unsigned long arg) int ret = 0; vaddr_t va; size_t num_pages; - enclave_t* enclave; + struct enclave* enclave; struct keystone_ioctl_alloc_vspace* enclp = (struct keystone_ioctl_alloc_vspace *) arg; va = enclp->vaddr; @@ -189,8 +192,8 @@ int keystone_alloc_vspace(unsigned long arg) int utm_init_ioctl(struct file *filp, unsigned long arg) { int ret = 0; - struct utm_t *utm; - enclave_t *enclave; + struct utm *utm; + struct enclave *enclave; struct keystone_ioctl_create_enclave *enclp = (struct keystone_ioctl_create_enclave *) arg; long long unsigned untrusted_size = enclp->params.untrusted_size; @@ -201,7 +204,7 @@ int utm_init_ioctl(struct file *filp, unsigned long arg) return -EINVAL; } - utm = kmalloc(sizeof(struct utm_t), GFP_KERNEL); + utm = kmalloc(sizeof(struct utm), GFP_KERNEL); if (!utm) { ret = -ENOMEM; return ret; @@ -210,7 +213,6 @@ int utm_init_ioctl(struct file *filp, unsigned long arg) ret = utm_init(utm, untrusted_size); /* prepare for mmap */ - filp->private_data = utm; enclave->utm = utm; return ret; @@ -219,7 +221,7 @@ int utm_init_ioctl(struct file *filp, unsigned long arg) int utm_alloc(unsigned long arg) { int ret = 0; - enclave_t *enclave; + struct enclave *enclave; struct addr_packed *addr = (struct addr_packed *) arg; unsigned long ueid = addr->eid; @@ -236,20 +238,29 @@ int utm_alloc(unsigned long arg) } -int keystone_destroy_enclave(unsigned long arg) +int keystone_destroy_enclave(struct file *filep, unsigned long arg) { int ret; - enclave_t *enclave; struct keystone_ioctl_create_enclave *enclp = (struct keystone_ioctl_create_enclave *) arg; unsigned long ueid = enclp->eid; + ret = __keystone_destroy_enclave(ueid); + if (!ret) { + filep->private_data = NULL; + } + return ret; +} + +int __keystone_destroy_enclave(unsigned int ueid) +{ + int ret; + struct enclave *enclave; enclave = get_enclave_by_id(ueid); if (!enclave) { keystone_err("invalid enclave id\n"); return -EINVAL; } - ret = SBI_CALL_1(SBI_SM_DESTROY_ENCLAVE, enclave->eid); if (ret) { keystone_err("fatal: cannot destroy enclave: SBI failed\n"); @@ -267,7 +278,7 @@ int keystone_resume_enclave(unsigned long arg) int ret = 0; struct keystone_ioctl_run_enclave *resume = (struct keystone_ioctl_run_enclave*) arg; unsigned long ueid = resume->eid; - enclave_t* enclave; + struct enclave* enclave; enclave = get_enclave_by_id(ueid); if (!enclave) @@ -302,7 +313,7 @@ long keystone_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) switch (cmd) { case KEYSTONE_IOC_CREATE_ENCLAVE: - ret = keystone_create_enclave((unsigned long) data); + ret = keystone_create_enclave(filep, (unsigned long) data); break; case KEYSTONE_IOC_ADD_PAGE: ret = keystone_add_page((unsigned long) data); @@ -314,7 +325,7 @@ long keystone_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) ret = keystone_finalize_enclave((unsigned long) data); break; case KEYSTONE_IOC_DESTROY_ENCLAVE: - ret = keystone_destroy_enclave((unsigned long) data); + ret = keystone_destroy_enclave(filep, (unsigned long) data); break; case KEYSTONE_IOC_RUN_ENCLAVE: ret = keystone_run_enclave((unsigned long) data); @@ -341,3 +352,21 @@ long keystone_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) return ret; } + +int keystone_release(struct inode *inode, struct file *file) { + unsigned long ueid = (unsigned long)(file->private_data); + + /* pr_info("Releasing enclave: %d\n", ueid); */ + + /* We need to send destroy enclave just the eid to close. */ + struct enclave *enclave = get_enclave_by_id(ueid); + + if (!enclave) { + /* If eid is set to the invalid id, then we do not do anything. */ + return -EINVAL; + } + if (enclave->close_on_pexit) { + return __keystone_destroy_enclave(ueid); + } + return 0; +} diff --git a/keystone-page.c b/keystone-page.c index d7199f5a7..b826a0b2b 100644 --- a/keystone-page.c +++ b/keystone-page.c @@ -22,13 +22,13 @@ void init_free_pages(struct list_head* pg_list, vaddr_t ptr, unsigned int count) vaddr_t get_free_page(struct list_head* pg_list) { - struct free_page_t* page; + struct free_page* page; vaddr_t addr; if(list_empty(pg_list)) return 0; - page = list_first_entry(pg_list, struct free_page_t, freelist); + page = list_first_entry(pg_list, struct free_page, freelist); addr = page->vaddr; list_del(&page->freelist); kfree(page); @@ -38,14 +38,14 @@ vaddr_t get_free_page(struct list_head* pg_list) void put_free_page(struct list_head* pg_list, vaddr_t page_addr) { - struct free_page_t* page = kmalloc(sizeof(struct free_page_t),GFP_KERNEL); + struct free_page* page = kmalloc(sizeof(struct free_page),GFP_KERNEL); page->vaddr = page_addr; list_add_tail(&page->freelist, pg_list); return; } /* Destroy all memory associated with an EPM */ -int epm_destroy(epm_t* epm) { +int epm_destroy(struct epm* epm) { /* Clean anything in the free list */ epm_clean_free_list(epm); @@ -67,7 +67,7 @@ int epm_destroy(epm_t* epm) { } /* Create an EPM and initialize the free list */ -int epm_init(epm_t* epm, unsigned int min_pages) +int epm_init(struct epm* epm, unsigned int min_pages) { pte_t* t; @@ -128,21 +128,21 @@ int epm_init(epm_t* epm, unsigned int min_pages) return 0; } -int epm_clean_free_list(epm_t* epm) +int epm_clean_free_list(struct epm* epm) { - struct free_page_t* page; + struct free_page* page; struct list_head* pg_list; pg_list = &epm->epm_free_list; while (!list_empty(&epm->epm_free_list)) { - page = list_first_entry(pg_list, struct free_page_t, freelist); + page = list_first_entry(pg_list, struct free_page, freelist); list_del(&page->freelist); kfree(page); } return 0; } -int utm_destroy(utm_t* utm){ +int utm_destroy(struct utm* utm){ /* Clean anything in the free list */ utm_clean_free_list(utm); @@ -154,21 +154,21 @@ int utm_destroy(utm_t* utm){ return 0; } -int utm_clean_free_list(utm_t* utm) +int utm_clean_free_list(struct utm* utm) { - struct free_page_t* page; + struct free_page* page; struct list_head* pg_list; pg_list = &utm->utm_free_list; while (!list_empty(&utm->utm_free_list)) { - page = list_first_entry(pg_list, struct free_page_t, freelist); + page = list_first_entry(pg_list, struct free_page, freelist); list_del(&page->freelist); kfree(page); } return 0; } -int utm_init(utm_t* utm, size_t untrusted_size) +int utm_init(struct utm* utm, size_t untrusted_size) { unsigned long req_pages = 0; unsigned long order = 0; @@ -249,29 +249,28 @@ static pte_t* __ept_walk_create(struct list_head* pg_list, pte_t* root_page_tabl } /* -static int __ept_va_avail(epm_t* epm, vaddr_t vaddr) +static int __ept_va_avail(struct epm* epm, vaddr_t vaddr) { pte_t* pte = __ept_walk(epm, vaddr); return pte == 0 || pte_val(*pte) == 0; } */ -paddr_t epm_get_free_pa(epm_t* epm) +paddr_t epm_get_free_pa(struct epm* epm) { - struct free_page_t* page; + struct free_page* page; struct list_head* pg_list; - paddr_t addr; pg_list = &(epm->epm_free_list); if(list_empty(pg_list)) return 0; - page = list_first_entry(pg_list, struct free_page_t, freelist); + page = list_first_entry(pg_list, struct free_page, freelist); return __pa(page->vaddr); } -paddr_t epm_va_to_pa(epm_t* epm, vaddr_t addr) +paddr_t epm_va_to_pa(struct epm* epm, vaddr_t addr) { pte_t* pte = __ept_walk(NULL, epm->root_page_table,addr); if(pte) @@ -282,7 +281,7 @@ paddr_t epm_va_to_pa(epm_t* epm, vaddr_t addr) /* This function pre-allocates the required page tables so that * the virtual addresses are linearly mapped to the physical memory */ -size_t epm_alloc_vspace(epm_t* epm, vaddr_t addr, size_t num_pages) +size_t epm_alloc_vspace(struct epm* epm, vaddr_t addr, size_t num_pages) { vaddr_t walk; size_t count; @@ -298,7 +297,7 @@ size_t epm_alloc_vspace(epm_t* epm, vaddr_t addr, size_t num_pages) } -vaddr_t utm_alloc_page(utm_t* utm, epm_t* epm, vaddr_t addr, unsigned long flags) +vaddr_t utm_alloc_page(struct utm* utm, struct epm* epm, vaddr_t addr, unsigned long flags) { vaddr_t page_addr; pte_t* pte = __ept_walk_create(&epm->epm_free_list, epm->root_page_table, addr); @@ -314,7 +313,7 @@ vaddr_t utm_alloc_page(utm_t* utm, epm_t* epm, vaddr_t addr, unsigned long flags return page_addr; } -vaddr_t epm_alloc_page(epm_t* epm, vaddr_t addr, unsigned long flags) +vaddr_t epm_alloc_page(struct epm* epm, vaddr_t addr, unsigned long flags) { vaddr_t page_addr; pte_t* pte = __ept_walk_create(&epm->epm_free_list, epm->root_page_table, addr); @@ -330,22 +329,22 @@ vaddr_t epm_alloc_page(epm_t* epm, vaddr_t addr, unsigned long flags) return page_addr; } -vaddr_t epm_alloc_rt_page_noexec(epm_t* epm, vaddr_t addr) +vaddr_t epm_alloc_rt_page_noexec(struct epm* epm, vaddr_t addr) { return epm_alloc_page(epm, addr, PTE_D | PTE_A | PTE_R | PTE_W); } -vaddr_t epm_alloc_rt_page(epm_t* epm, vaddr_t addr) +vaddr_t epm_alloc_rt_page(struct epm* epm, vaddr_t addr) { return epm_alloc_page(epm, addr, PTE_D | PTE_A | PTE_R | PTE_W | PTE_X); } -vaddr_t epm_alloc_user_page_noexec(epm_t* epm, vaddr_t addr) +vaddr_t epm_alloc_user_page_noexec(struct epm* epm, vaddr_t addr) { return epm_alloc_page(epm, addr, PTE_D | PTE_A | PTE_R | PTE_W | PTE_U); } -vaddr_t epm_alloc_user_page(epm_t* epm, vaddr_t addr) +vaddr_t epm_alloc_user_page(struct epm* epm, vaddr_t addr) { return epm_alloc_page(epm, addr, PTE_D | PTE_A | PTE_R | PTE_X | PTE_W | PTE_U); } diff --git a/keystone.c b/keystone.c index caf92ebb6..88322ed0d 100644 --- a/keystone.c +++ b/keystone.c @@ -24,7 +24,8 @@ MODULE_LICENSE("Dual BSD/GPL"); static const struct file_operations keystone_fops = { .owner = THIS_MODULE, .mmap = keystone_mmap, - .unlocked_ioctl = keystone_ioctl + .unlocked_ioctl = keystone_ioctl, + .release = keystone_release }; struct miscdevice keystone_dev = { @@ -40,11 +41,18 @@ void keystone_handle_interrupts(void) csr_set(sstatus, SR_SIE); csr_write(sstatus, old); } + int keystone_mmap(struct file* filp, struct vm_area_struct *vma) { - struct utm_t* utm; + struct utm* utm; + struct enclave* enclave; unsigned long vsize, psize; - utm = filp->private_data; + enclave = get_enclave_by_id((unsigned long) filp->private_data); + if(!enclave) { + keystone_err("invalid enclave id\n"); + return -EINVAL; + } + utm = enclave->utm; vsize = vma->vm_end - vma->vm_start; psize = utm->size; if (vsize > psize) diff --git a/keystone.h b/keystone.h index 61af25282..eade0b42e 100644 --- a/keystone.h +++ b/keystone.h @@ -59,15 +59,16 @@ extern struct miscdevice keystone_dev; void keystone_handle_interrupts(void); long keystone_ioctl(struct file* filep, unsigned int cmd, unsigned long arg); +int keystone_release(struct inode *inode, struct file *file); int keystone_mmap(struct file *filp, struct vm_area_struct *vma); -struct free_page_t { +struct free_page { vaddr_t vaddr; struct list_head freelist; }; /* enclave private memory */ -typedef struct epm_t { +struct epm { struct list_head epm_free_list; pte_t* root_page_table; vaddr_t ptr; @@ -75,66 +76,67 @@ typedef struct epm_t { unsigned long order; paddr_t pa; bool is_cma; -} epm_t; +}; -typedef struct utm_t { +struct utm { struct list_head utm_free_list; pte_t* root_page_table; void* ptr; size_t size; unsigned long order; -} utm_t; +}; -typedef struct keystone_enclave_t +struct enclave { unsigned int eid; - struct utm_t* utm; - struct epm_t* epm; -} enclave_t; + int close_on_pexit; + struct utm* utm; + struct epm* epm; +}; // global debug functions void debug_dump(char* ptr, unsigned long size); // runtime/app loader -int keystone_rtld_init_runtime(enclave_t* enclave, void* __user rt_ptr, size_t rt_sz, unsigned long rt_stack_sz, unsigned long* rt_offset); +int keystone_rtld_init_runtime(struct enclave* enclave, void* __user rt_ptr, size_t rt_sz, unsigned long rt_stack_sz, unsigned long* rt_offset); -int keystone_rtld_init_app(enclave_t* enclave, void* __user app_ptr, size_t app_sz, size_t app_stack_sz, unsigned long stack_offset); +int keystone_rtld_init_app(struct enclave* enclave, void* __user app_ptr, size_t app_sz, size_t app_stack_sz, unsigned long stack_offset); // untrusted memory mapper -int keystone_rtld_init_untrusted(enclave_t* enclave, void* untrusted_ptr, size_t untrusted_size); +int keystone_rtld_init_untrusted(struct enclave* enclave, void* untrusted_ptr, size_t untrusted_size); -enclave_t* get_enclave_by_id(unsigned int ueid); -enclave_t* create_enclave(unsigned long min_pages); -int destroy_enclave(enclave_t* enclave); +struct enclave* get_enclave_by_id(unsigned int ueid); +struct enclave* create_enclave(unsigned long min_pages); +int destroy_enclave(struct enclave* enclave); -unsigned int enclave_idr_alloc(enclave_t* enclave); -enclave_t* enclave_idr_remove(unsigned int ueid); -enclave_t* get_enclave_by_id(unsigned int ueid); +unsigned int enclave_idr_alloc(struct enclave* enclave); +struct enclave* enclave_idr_remove(unsigned int ueid); +struct enclave* get_enclave_by_id(unsigned int ueid); -static inline uintptr_t epm_satp(epm_t* epm) { +static inline uintptr_t epm_satp(struct epm* epm) { return ((uintptr_t)epm->root_page_table >> RISCV_PGSHIFT | SATP_MODE_CHOICE); } void init_free_pages(struct list_head* pg_list, vaddr_t base, unsigned int count); void put_free_page(struct list_head* pg_list, vaddr_t page_addr); vaddr_t get_free_page(struct list_head* pg_list); -int epm_destroy(epm_t* epm); -int epm_init(epm_t* epm, unsigned int count); -int utm_destroy(utm_t* utm); -int utm_init(utm_t* utm, size_t untrusted_size); -int epm_clean_free_list(epm_t* epm); -int utm_clean_free_list(utm_t* utm); -paddr_t epm_va_to_pa(epm_t* epm, vaddr_t addr); -paddr_t epm_get_free_pa(epm_t* epm); -vaddr_t utm_alloc_page(utm_t* utm, epm_t* epm, vaddr_t addr, unsigned long flags); -size_t epm_alloc_vspace(epm_t* epm, vaddr_t addr, size_t num_pages); -vaddr_t epm_alloc_rt_page(epm_t* epm, vaddr_t addr); -vaddr_t epm_alloc_rt_page_noexec(epm_t* epm, vaddr_t addr); -vaddr_t epm_alloc_user_page(epm_t* epm, vaddr_t addr); -vaddr_t epm_alloc_user_page_noexec(epm_t* epm, vaddr_t addr); -void epm_free_page(epm_t* epm, vaddr_t addr); +int epm_destroy(struct epm* epm); +int epm_init(struct epm* epm, unsigned int count); +int utm_destroy(struct utm* utm); +int utm_init(struct utm* utm, size_t untrusted_size); +int epm_clean_free_list(struct epm* epm); +int utm_clean_free_list(struct utm* utm); +paddr_t epm_va_to_pa(struct epm* epm, vaddr_t addr); +paddr_t epm_get_free_pa(struct epm* epm); +vaddr_t utm_alloc_page(struct utm* utm, struct epm* epm, vaddr_t addr, unsigned long flags); +size_t epm_alloc_vspace(struct epm* epm, vaddr_t addr, size_t num_pages); +vaddr_t epm_alloc_rt_page(struct epm* epm, vaddr_t addr); +vaddr_t epm_alloc_rt_page_noexec(struct epm* epm, vaddr_t addr); +vaddr_t epm_alloc_user_page(struct epm* epm, vaddr_t addr); +vaddr_t epm_alloc_user_page_noexec(struct epm* epm, vaddr_t addr); +void epm_free_page(struct epm* epm, vaddr_t addr); unsigned long calculate_required_pages(