Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

vmpu: vmpu_buffer_access checks ACL #513

Closed
wants to merge 5 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 9 additions & 8 deletions core/system/src/ipc.c
Original file line number Diff line number Diff line change
Expand Up @@ -164,30 +164,31 @@ static int ipc_deliver(uvisor_ipc_t * send_ipc, uvisor_ipc_t * recv_ipc,

static bool ipc_is_ok(int box_id, const uvisor_ipc_t * ipc) {
return ipc &&
vmpu_buffer_access_is_ok(box_id, ipc, sizeof(*ipc));
vmpu_buffer_access_is_ok(box_id, ipc, sizeof(*ipc), UVISOR_TACL_UREAD | UVISOR_TACL_UWRITE);
}

static bool ipc_io_array_is_ok(int box_id, const uvisor_ipc_io_t * array) {
UVISOR_STATIC_ASSERT(UVISOR_IPC_SEND_SLOTS == UVISOR_IPC_RECV_SLOTS, UVISOR_IPC_SEND_SLOTS_should_be_equal_to_UVISOR_IPC_RECV_SLOTS);

return array &&
vmpu_buffer_access_is_ok(box_id, array, sizeof(*array) * UVISOR_IPC_SEND_SLOTS);
vmpu_buffer_access_is_ok(box_id, array, sizeof(*array) * UVISOR_IPC_SEND_SLOTS, UVISOR_TACL_UREAD | UVISOR_TACL_UWRITE);
}

static bool pool_queue_is_ok(int box_id, const uvisor_pool_queue_t * queue) {
return queue &&
vmpu_buffer_access_is_ok(box_id, queue, sizeof(*queue)) &&
vmpu_buffer_access_is_ok(box_id, queue, sizeof(*queue), UVISOR_TACL_UREAD | UVISOR_TACL_UWRITE) &&
queue->pool &&
vmpu_buffer_access_is_ok(box_id, queue->pool, sizeof(*queue->pool)) &&
vmpu_buffer_access_is_ok(box_id, queue->pool->array, queue->pool->stride * queue->pool->num);
vmpu_buffer_access_is_ok(box_id, queue->pool, sizeof(*queue->pool), UVISOR_TACL_UREAD | UVISOR_TACL_UWRITE) &&
vmpu_buffer_access_is_ok(box_id, queue->pool->array, queue->pool->stride * queue->pool->num, UVISOR_TACL_UREAD | UVISOR_TACL_UWRITE);
}

static bool ipc_io_is_ok(int box_id, const uvisor_ipc_io_t * io) {
return io &&
vmpu_buffer_access_is_ok(box_id, io, sizeof(*io)) &&
vmpu_buffer_access_is_ok(box_id, io, sizeof(*io), UVISOR_TACL_UREAD | UVISOR_TACL_UWRITE) &&
io->desc &&
vmpu_buffer_access_is_ok(box_id, io->desc, sizeof(*(io->desc))) &&
vmpu_buffer_access_is_ok(box_id, io->msg, io->desc->len);
vmpu_buffer_access_is_ok(box_id, io->desc, sizeof(*(io->desc)), UVISOR_TACL_UREAD | UVISOR_TACL_UWRITE) &&
/* FIXME: Split check for send/recv */
vmpu_buffer_access_is_ok(box_id, io->msg, io->desc->len, UVISOR_TACL_UREAD | UVISOR_TACL_UWRITE);
}

void ipc_drain_queue(void)
Expand Down
3 changes: 2 additions & 1 deletion core/vmpu/inc/vmpu_mpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -81,11 +81,12 @@ MpuRegion * vmpu_region_find_for_address(uint8_t box_id, uint32_t address);
* @param box_id the box id to look up the buffer in
* @param addr the buffer start address
* @param size the size of the buffer in bytes
* @param acl the ACL of access required
* @returns true if the buffer is contained within the set of memory a
* box could access, even if a fault and recovery is required
* to access the buffer; false otherwise.
* */
bool vmpu_buffer_access_is_ok(int box_id, const void * addr, size_t size);
bool vmpu_buffer_access_is_ok(int box_id, const void * addr, size_t size, UvisorBoxAcl acl);

/* Region management */

Expand Down
18 changes: 10 additions & 8 deletions core/vmpu/src/mpu_armv7m/vmpu_armv7m_mpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -330,7 +330,7 @@ MpuRegion * vmpu_region_find_for_address(uint8_t box_id, uint32_t address)

/* Region management */

static bool vmpu_buffer_access_is_ok_static(uint32_t start_addr, uint32_t end_addr)
static bool vmpu_buffer_access_is_ok_static(uint32_t start_addr, uint32_t end_addr, UvisorBoxAcl acl)
{
/* NOTE: Buffers are not allowed to span more than 1 region. If they do
* span more than one region, access will be denied. */
Expand All @@ -350,11 +350,12 @@ static bool vmpu_buffer_access_is_ok_static(uint32_t start_addr, uint32_t end_ad
uint32_t size = (1UL << (((rasr & MPU_RASR_SIZE_Msk) >> MPU_RASR_SIZE_Pos) + 1UL));
uint32_t start = rbar & ~(size - 1);
uint32_t end = start + size;
uint32_t config = vmpu_map_acl(acl);
/* Check entire region if no subregion is disabled. */
if (!(rasr & MPU_RASR_SRD_Msk)) {
/* Test that the buffer is fully contained in the region. */
if (vmpu_value_in_range(start, end, start_addr) && vmpu_value_in_range(start, end, end_addr)) {
return true;
return (config & rasr) == config;
}
} else {
/* Check each subregion separately. */
Expand All @@ -366,7 +367,7 @@ static bool vmpu_buffer_access_is_ok_static(uint32_t start_addr, uint32_t end_ad
uint32_t sub_end = sub_start + sub_size;
/* Test that the buffer is fully contained in the region. */
if (vmpu_value_in_range(sub_start, sub_end, start_addr) && vmpu_value_in_range(sub_start, sub_end, end_addr)) {
return true;
return (config & rasr) == config;
}
}
}
Expand All @@ -376,7 +377,7 @@ static bool vmpu_buffer_access_is_ok_static(uint32_t start_addr, uint32_t end_ad
return false;
}

bool vmpu_buffer_access_is_ok(int box_id, const void * addr, size_t size)
bool vmpu_buffer_access_is_ok(int box_id, const void * addr, size_t size, UvisorBoxAcl acl)
{
uint32_t start_addr = (uint32_t) addr;
uint32_t end_addr = start_addr + size - 1;
Expand All @@ -393,20 +394,21 @@ bool vmpu_buffer_access_is_ok(int box_id, const void * addr, size_t size)
if (box_id != 0) {
/* Check the public box as well as the specified box, since public box
* memories are accessible by all boxes. */
if (vmpu_buffer_access_is_ok(0, addr, size)) {
if (vmpu_buffer_access_is_ok(0, addr, size, acl)) {
return true;
}
} else {
/* Check static regions. */
if (vmpu_buffer_access_is_ok_static(start_addr, end_addr)) {
if (vmpu_buffer_access_is_ok_static(start_addr, end_addr, acl)) {
return true;
}
}

/* Check if addr range lies in page heap. */
int error = page_allocator_check_range_for_box(box_id, start_addr, end_addr);
if (error == UVISOR_ERROR_PAGE_OK) {
return true;
/* Check heap ACL */
return (acl & UVISOR_TACLDEF_STACK) == acl;
} else if (error != UVISOR_ERROR_PAGE_INVALID_PAGE_ORIGIN) {
return false;
}
Expand All @@ -419,7 +421,7 @@ bool vmpu_buffer_access_is_ok(int box_id, const void * addr, size_t size)

/* If the end address is also within the region, and the region is NS
* accessible, then access to the buffer is OK. */
return vmpu_value_in_range(region->start, region->end, end_addr);
return vmpu_value_in_range(region->start, region->end, end_addr) && (region->acl) == acl;
}

/* MPU access */
Expand Down
15 changes: 9 additions & 6 deletions core/vmpu/src/mpu_armv8m/vmpu_armv8m_mpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ static bool vmpu_region_is_ns(uint32_t rlar)
(rlar & SAU_RLAR_ENABLE_Msk) == SAU_RLAR_ENABLE_Msk;
}

static bool vmpu_buffer_access_is_ok_static(uint32_t start_addr, uint32_t end_addr)
static bool vmpu_buffer_access_is_ok_static(uint32_t start_addr, uint32_t end_addr, UvisorBoxAcl acl)
{
/* NOTE: Buffers are not allowed to span more than 1 region. If they do
* span more than one region, access will be denied. */
Expand All @@ -244,14 +244,16 @@ static bool vmpu_buffer_access_is_ok_static(uint32_t start_addr, uint32_t end_ad

/* Test that the buffer is fully contained in the region. */
if (vmpu_value_in_range(start, end, start_addr) && vmpu_value_in_range(start, end, end_addr)) {
/* FIXME: Check ACL.
* Original ACL is not stored here, but here needs a check */
return true;
}
}

return false;
}

bool vmpu_buffer_access_is_ok(int box_id, const void * addr, size_t size)
bool vmpu_buffer_access_is_ok(int box_id, const void * addr, size_t size, UvisorBoxAcl acl)
{
uint32_t start_addr = (uint32_t) UVISOR_GET_NS_ALIAS(addr);
uint32_t end_addr = start_addr + size - 1;
Expand All @@ -268,20 +270,21 @@ bool vmpu_buffer_access_is_ok(int box_id, const void * addr, size_t size)
if (box_id != 0) {
/* Check the public box as well as the specified box, since public box
* memories are accessible by all boxes. */
if (vmpu_buffer_access_is_ok(0, addr, size)) {
if (vmpu_buffer_access_is_ok(0, addr, size, acl)) {
return true;
}
} else {
/* Check static regions. */
if (vmpu_buffer_access_is_ok_static(start_addr, end_addr)) {
if (vmpu_buffer_access_is_ok_static(start_addr, end_addr, acl)) {
return true;
}
}

/* Check if addr range lies in page heap. */
int error = page_allocator_check_range_for_box(box_id, start_addr, end_addr);
if (error == UVISOR_ERROR_PAGE_OK) {
return true;
/* Check heap ACL */
return (acl & UVISOR_TACLDEF_STACK) == acl;
} else if (error != UVISOR_ERROR_PAGE_INVALID_PAGE_ORIGIN) {
return false;
}
Expand All @@ -295,7 +298,7 @@ bool vmpu_buffer_access_is_ok(int box_id, const void * addr, size_t size)
/* If the end address is also within the region, and the region is NS
* accessible, then access to the buffer is OK. */
return vmpu_value_in_range(region->start, region->end, end_addr) &&
vmpu_region_is_ns(region->config);
vmpu_region_is_ns(region->config) && (region->acl) == acl;
}

/* SAU access */
Expand Down
15 changes: 8 additions & 7 deletions core/vmpu/src/mpu_kinetis/vmpu_kinetis_mpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -305,7 +305,7 @@ MpuRegion * vmpu_region_find_for_address(uint8_t box_id, uint32_t address)

/* Region management */

static bool vmpu_buffer_access_is_ok_static(uint32_t start_addr, uint32_t end_addr)
static bool vmpu_buffer_access_is_ok_static(uint32_t start_addr, uint32_t end_addr, UvisorBoxAcl acl)
{
/* NOTE: Buffers are not allowed to span more than 1 region. If they do
* span more than one region, access will be denied. */
Expand All @@ -319,14 +319,15 @@ static bool vmpu_buffer_access_is_ok_static(uint32_t start_addr, uint32_t end_ad

/* Test that the buffer is fully contained in the region. */
if (vmpu_value_in_range(start, end, start_addr) && vmpu_value_in_range(start, end, end_addr)) {
/* FIXME: Check ACL here */
return true;
}
}

return false;
}

bool vmpu_buffer_access_is_ok(int box_id, const void * addr, size_t size)
bool vmpu_buffer_access_is_ok(int box_id, const void * addr, size_t size, UvisorBoxAcl acl)
{
uint32_t start_addr = (uint32_t) addr;
uint32_t end_addr = start_addr + size - 1;
Expand All @@ -343,25 +344,25 @@ bool vmpu_buffer_access_is_ok(int box_id, const void * addr, size_t size)
if (box_id != 0) {
/* Check the public box as well as the specified box, since public box
* memories are accessible by all boxes. */
if (vmpu_buffer_access_is_ok(0, addr, size)) {
if (vmpu_buffer_access_is_ok(0, addr, size, acl)) {
return true;
}
} else {
/* Check static regions. */
if (vmpu_buffer_access_is_ok_static(start_addr, end_addr)) {
if (vmpu_buffer_access_is_ok_static(start_addr, end_addr, acl)) {
return true;
}
}

/* Check if addr range lies in AIPS */
if (vmpu_fault_find_acl_aips(box_id, start_addr, size)) {
return true;
return (UVISOR_TACLDEF_PERIPH & acl) == acl;
}

/* Check if addr range lies in page heap. */
int error = page_allocator_check_range_for_box(box_id, start_addr, end_addr);
if (error == UVISOR_ERROR_PAGE_OK) {
return true;
return (UVISOR_TACLDEF_STACK & acl) == acl;
} else if (error != UVISOR_ERROR_PAGE_INVALID_PAGE_ORIGIN) {
return false;
}
Expand All @@ -374,7 +375,7 @@ bool vmpu_buffer_access_is_ok(int box_id, const void * addr, size_t size)

/* If the end address is also within the region, and the region is NS
* accessible, then access to the buffer is OK. */
return vmpu_value_in_range(region->start, region->end, end_addr);
return vmpu_value_in_range(region->start, region->end, end_addr) && (region->acl & acl) == acl;
}

/* MPU access */
Expand Down