Update uVisor page allocator

pull/2558/head
Milosch Meriac 2016-08-26 09:39:17 +00:00
parent 170ba1c802
commit 9cb6d6099e
16 changed files with 106 additions and 53 deletions

View File

@ -1,7 +1,7 @@
552 Milosch Meriac
457 Alessandro Angelino
553 Milosch Meriac
458 Alessandro Angelino
42 Niklas Hauser
40 Jaeden Amero
31 Niklas Hauser
3 Hugo Vincent
3 JaredCJR
3 Jim Huang

View File

@ -1 +1 @@
v0.20.1-alpha
v0.21.0-alpha

View File

@ -18,6 +18,7 @@
#define __UVISOR_API_BOX_CONFIG_H__
#include "api/inc/uvisor_exports.h"
#include "api/inc/page_allocator_exports.h"
#include <stddef.h>
#include <stdint.h>
@ -53,6 +54,14 @@ UVISOR_EXTERN const uint32_t __uvisor_mode;
\
extern const __attribute__((section(".keep.uvisor.cfgtbl_ptr_first"), aligned(4))) void * const main_cfg_ptr = &main_cfg;
/* Creates a global page heap with at least `minimum_number_of_pages` each of size `page_size` in bytes.
* The total page heap size is at least `minimum_number_of_pages * page_size`. */
#define UVISOR_SET_PAGE_HEAP(page_size, minimum_number_of_pages) \
const uint32_t __uvisor_page_size = (page_size); \
uint8_t __attribute__((section(".keep.uvisor.page_heap"))) \
main_page_heap_reserved[ (page_size) * (minimum_number_of_pages) ]
/* this macro selects an overloaded macro (variable number of arguments) */
#define __UVISOR_BOX_MACRO(_1, _2, _3, _4, NAME, ...) NAME

View File

@ -34,4 +34,7 @@ UVISOR_EXTERN int uvisor_page_malloc(UvisorPageTable * const table);
*/
UVISOR_EXTERN int uvisor_page_free(const UvisorPageTable * const table);
/* @returns the active page size for one page. */
UVISOR_EXTERN uint32_t uvisor_get_page_size(void);
#endif /* __UVISOR_API_PAGE_ALLOCATOR_H__ */

View File

@ -30,27 +30,9 @@
#define UVISOR_ERROR_PAGE_INVALID_PAGE_OWNER (UVISOR_ERROR_CLASS_PAGE + 5)
#define UVISOR_ERROR_PAGE_INVALID_PAGE_COUNT (UVISOR_ERROR_CLASS_PAGE + 6)
/* Must be a power of 2 for MPU alignment in ARMv7-M with ARM MPU.
* Must be multiple of 32 for K64F MPU. */
#ifndef UVISOR_PAGE_SIZE
#define UVISOR_PAGE_SIZE ((uint32_t) 16 * 1024)
#endif
/* Return the rounded up number of pages required to hold `size`. */
#define UVISOR_PAGES_FOR_SIZE(size) ((size + UVISOR_PAGE_SIZE - 1) / UVISOR_PAGE_SIZE)
/* Create a page table with `count` many entries. */
#define UVISOR_PAGE_TABLE(count) \
struct { \
uint32_t page_size; \
uint32_t page_count; \
void * page_origins[count]; \
}
/* Create a page table with enough pages to hold `size`. */
#define UVISOR_PAGE_TABLE_FOR_SIZE(size) UVISOR_PAGE_TABLE(UVISOR_PAGES_FOR_SIZE(size))
/* Contains the uVisor page size.
* @warning Do not read directly, instead use `uvisor_get_page_size()` accessor! */
UVISOR_EXTERN const uint32_t __uvisor_page_size;
typedef struct {
uint32_t page_size; /* The page size in bytes. Must be multiple of `UVISOR_PAGE_SIZE`! */

View File

@ -41,8 +41,10 @@
#include "page_allocator_config.h"
/* Maps the page to the owning box handle. */
page_owner_t g_page_owner_table[UVISOR_PAGE_TABLE_MAX_COUNT];
/* Contains the page usage mapped by owner. */
uint32_t g_page_owner_map[UVISOR_MAX_BOXES][UVISOR_PAGE_MAP_COUNT];
/* Contains total page usage. */
uint32_t g_page_usage_map[UVISOR_PAGE_MAP_COUNT];
/* Contains the configured page size. */
uint32_t g_page_size;
/* Points to the beginning of the page heap. */
@ -119,12 +121,16 @@ void page_allocator_init(void * const heap_start, void * const heap_end, const u
g_page_count_total = ((uint32_t) heap_end - start) / g_page_size;
}
/* Clamp page count to table size. */
if (g_page_count_total > UVISOR_PAGE_TABLE_MAX_COUNT) {
g_page_count_total = UVISOR_PAGE_TABLE_MAX_COUNT;
if (g_page_count_total > UVISOR_PAGE_MAX_COUNT) {
DPRINTF("uvisor_page_init: Clamping available page count from %u to %u!\n", g_page_count_total, UVISOR_PAGE_MAX_COUNT);
/* Move the heap start address forward so that the last clamped page is located nearest to the heap end. */
g_page_heap_start += (g_page_count_total - UVISOR_PAGE_MAX_COUNT) * g_page_size;
/* Clamp the page count. */
g_page_count_total = UVISOR_PAGE_MAX_COUNT;
}
g_page_count_free = g_page_count_total;
/* Remember the end of the heap. */
g_page_heap_end = g_page_heap_start + g_page_count_free * g_page_size;
g_page_heap_end = g_page_heap_start + g_page_count_total * g_page_size;
DPRINTF("uvisor_page_init:\n.page_heap start 0x%08x\n.page_heap end 0x%08x\n.page_heap available %ukB split into %u pages of %ukB\n\n",
(unsigned int) g_page_heap_start,
@ -133,11 +139,9 @@ void page_allocator_init(void * const heap_start, void * const heap_end, const u
(unsigned int) g_page_count_total,
(unsigned int) (g_page_size / 1024));
uint32_t page = 0;
for (; page < UVISOR_PAGE_TABLE_MAX_COUNT; page++) {
g_page_owner_table[page] = UVISOR_PAGE_UNUSED;
page_allocator_reset_faults(page);
}
/* Force a reset of owner and usage page maps. */
memset(g_page_owner_map, 0, sizeof(g_page_owner_map));
memset(g_page_usage_map, 0, sizeof(g_page_usage_map));
}
int page_allocator_malloc(UvisorPageTable * const table)
@ -176,10 +180,20 @@ int page_allocator_malloc(UvisorPageTable * const table)
/* Iterate through the page table and find the empty pages. */
uint32_t page = 0;
for (; (page < g_page_count_total) && pages_required; page++) {
/* If the page is unused, it's entry is UVISOR_PAGE_UNUSED (not NULL!). */
if (g_page_owner_table[page] == UVISOR_PAGE_UNUSED) {
/* Marry this page to the box id. */
g_page_owner_table[page] = box_id;
/* If the page is unused, map_get returns zero. */
if (!page_allocator_map_get(g_page_usage_map, page)) {
/* Remember this page as used. */
page_allocator_map_set(g_page_usage_map, page);
/* Pages of box 0 are accessible to all other boxes! */
if (box_id == 0) {
uint32_t ii = 0;
for (; ii < UVISOR_MAX_BOXES; ii++) {
page_allocator_map_set(g_page_owner_map[ii], page);
}
} else {
/* Otherwise, remember ownership only for active box. */
page_allocator_map_set(g_page_owner_map[box_id], page);
}
/* Reset the fault count for this page. */
page_allocator_reset_faults(page);
/* Get the pointer to the page. */
@ -243,14 +257,25 @@ int page_allocator_free(const UvisorPageTable * const table)
return UVISOR_ERROR_PAGE_INVALID_PAGE_ORIGIN;
}
/* Check if the page belongs to the caller. */
if (g_page_owner_table[page_index] == box_id) {
g_page_owner_table[page_index] = UVISOR_PAGE_UNUSED;
if (page_allocator_map_get(g_page_owner_map[box_id], page_index)) {
/* Clear the owner and usage page maps for this page. */
page_allocator_map_clear(g_page_usage_map, page_index);
/* If the page was owned by box 0, we need to remove it from all other boxes! */
if (box_id == 0) {
uint32_t ii = 0;
for (; ii < UVISOR_MAX_BOXES; ii++) {
page_allocator_map_clear(g_page_owner_map[ii], page_index);
}
} else {
/* Otherwise, only remove for the active box. */
page_allocator_map_clear(g_page_owner_map[box_id], page_index);
}
g_page_count_free++;
DPRINTF("uvisor_page_free: Freeing page at index %u\n", page_index);
}
else {
/* Abort if the page doesn't belong to the caller. */
if (g_page_owner_table[page_index] == UVISOR_PAGE_UNUSED) {
if (!page_allocator_map_get(g_page_usage_map, page_index)) {
DPRINTF("uvisor_page_free: FAIL: Page %u is not allocated!\n\n", page_index);
} else {
DPRINTF("uvisor_page_free: FAIL: Page %u is not owned by box %u!\n\n", page_index, box_id);

View File

@ -25,8 +25,8 @@
* a relatively low limit to the number of pages.
* By default a maximum of 16 pages are allowed. This can only be overwritten
* by the porting engineer for the current platform. */
#ifndef UVISOR_PAGE_TABLE_MAX_COUNT
#define UVISOR_PAGE_TABLE_MAX_COUNT ((uint32_t) 16)
#ifndef UVISOR_PAGE_MAX_COUNT
#define UVISOR_PAGE_MAX_COUNT (16UL)
#endif
/* The number of pages is decided by the page size. A small page size leads to
* a lot of pages, however, number of pages is capped for efficiency.
@ -34,12 +34,49 @@
* will lead to allocation failures. This can only be overwritten
* by the porting engineer for the current platform. */
#ifndef UVISOR_PAGE_SIZE_MINIMUM
#define UVISOR_PAGE_SIZE_MINIMUM ((uint32_t) 1024)
#define UVISOR_PAGE_SIZE_MINIMUM (1024UL)
#endif
/* Defines the number of uint32_t page owner masks in the owner map. */
#define UVISOR_PAGE_MAP_COUNT ((UVISOR_PAGE_MAX_COUNT + 31) / 32)
/* The page box_id is the box id which is 8-bit large. */
typedef uint8_t page_owner_t;
/* Define a unused value for the page table. */
#define UVISOR_PAGE_UNUSED ((page_owner_t) (-1))
/* Contains the total number of available pages. */
extern uint8_t g_page_count_total;
/** Sets the page bit in the page map array.
* @param map an array of `uint32_t` containing the page map
* @param page the index of the page to be set
*/
static inline void page_allocator_map_set(uint32_t * const map, uint8_t page)
{
page += UVISOR_PAGE_MAP_COUNT * 32 - g_page_count_total;
map[page / 32] |= (1UL << (page % 32));
}
/** Clears the page bit in the page map array.
* @param map an array of `uint32_t` containing the page map
* @param page the index of the page to be set
*/
static inline void page_allocator_map_clear(uint32_t * const map, uint8_t page)
{
page += UVISOR_PAGE_MAP_COUNT * 32 - g_page_count_total;
map[page / 32] &= ~(1UL << (page % 32));
}
/** Check if the page bit is set int the page map array.
* @param map an array of `uint32_t` containing the page map
* @param page the index of the page to be set
* @retval 0 if page bit is not set
* @retval 1 if page bit is set
*/
static inline int page_allocator_map_get(const uint32_t * const map, uint8_t page)
{
page += UVISOR_PAGE_MAP_COUNT * 32 - g_page_count_total;
return (map[page / 32] >> (page % 32)) & 0x1;
}
#endif /* __PAGE_ALLOCATOR_CONFIG_H__ */

View File

@ -32,10 +32,6 @@
/* offsetof is a gcc built-in function, this is the manual implementation */
#define OFFSETOF(type, member) ((uint32_t) (&(((type *)(0))->member)))
/* Declare this variable here, so the tier-2 allocator _always_ uses the
* page size that the tier-1 allocator expects! */
const uint32_t __uvisor_page_size = UVISOR_PAGE_SIZE;
/* Internal structure currently only contains the page table. */
typedef struct {
UvisorPageTable table;
@ -71,10 +67,11 @@ SecureAllocator secure_allocator_create_with_pages(
size_t size,
size_t maximum_malloc_size)
{
const uint32_t page_size = uvisor_get_page_size();
/* The rt_Memory allocator puts one MEMP structure at both the
* beginning and end of the memory pool. */
const size_t block_overhead = 2 * sizeof(MEMP);
const size_t page_size_with_overhead = UVISOR_PAGE_SIZE + block_overhead;
const size_t page_size_with_overhead = page_size + block_overhead;
/* Calculate the integer part of required the page count. */
size_t page_count = size / page_size_with_overhead;
/* Add another page if the remainder is not zero. */
@ -84,7 +81,7 @@ SecureAllocator secure_allocator_create_with_pages(
DPRINTF("secure_allocator_create_with_pages: Requesting %u pages for at least %uB\n", page_count, size);
/* Compute the maximum allocation within our blocks. */
size_t maximum_allocation_size = UVISOR_PAGE_SIZE - block_overhead;
size_t maximum_allocation_size = page_size - block_overhead;
/* If the required maximum allocation is larger than we can provide, abort. */
if (maximum_malloc_size > maximum_allocation_size) {
DPRINTF("secure_allocator_create_with_pages: Maximum allocation request %uB is larger then available %uB\n\n", maximum_malloc_size, maximum_allocation_size);
@ -104,7 +101,7 @@ SecureAllocator secure_allocator_create_with_pages(
}
/* Prepare the page table. */
allocator->table.page_size = UVISOR_PAGE_SIZE;
allocator->table.page_size = page_size;
allocator->table.page_count = page_count;
/* Get me some pages. */
if (uvisor_page_malloc((UvisorPageTable *) &(allocator->table))) {
@ -116,7 +113,7 @@ SecureAllocator secure_allocator_create_with_pages(
/* Initialize a MEMP structure in all pages. */
for(size_t ii = 0; ii < page_count; ii++) {
/* Add each page as a pool. */
rt_init_mem(allocator->table.page_origins[ii], UVISOR_PAGE_SIZE);
rt_init_mem(allocator->table.page_origins[ii], page_size);
DPRINTF("secure_allocator_create_with_pages: Created MEMP allocator %p with offset %d\n", allocator->table.page_origins[ii], 0);
}
DPRINTF("\n");