From 2420c5818c4122a25fe1449a51b2d56945362d8b Mon Sep 17 00:00:00 2001 From: Sergiu Moga Date: Sat, 28 Oct 2023 18:57:12 +0300 Subject: [PATCH 01/18] uk/plat/memory: Introduce `pg_off` and `pg_count` memregion fields To make memory region management easier w.r.t. alignment handling, define two additional fields for `struct ukplat_memregion_desc`: - `pg_off` to represent the in-page offset from where the actual resource this memory region is dedicated to starts - `pg_count` to represent the length of the entire, end-to-end page-aligned, memory region in number of pages Thus, the definition of some other fields shall then change: - `pbase` will be the physical page-aligned base address of the region. This means that in order to get the actual address of a resource, one may have to make the following basic addition: `pbase` + `pg_off` - `vbase` same as `pbase` but for virtual base address - `len` will now represent the length of the resource inside the region, not the length of the region. E.g. For a resource with address `0x1050` and length `0x430` the corresponding memory region descriptor will have the following values: - `pbase` and `vbase` equal to `0x1000` (`PAGE_ALIGN_DOWN(0x1050)`) - `pg_off` equal to `0x50` (`0x1050 & ~PAGE_MASK`) - `pg_count` equal to `5` (`PAGE_COUNT(0x1050 + 0x430)`) - `len` equal to `0x430` The other fields (`type`, `flags`, `name`) will keep their meaning. Now with the new structure, make sure that every call site to `ukplat_memregion_list_insert` also initializes `pg_off` and `pg_count` accordingly. Most importantly, deprecate the manual alignment and restoration of memory regions during coalescing, as it is not longer needed. The newly introduced fields guarantee that `pbase` and `pg_count` combined will always yield end-to-end aligned memory regions. In the case of printing, make memory map printing functionality show two sets of address ranges for each memory region descriptor: one for the page-aligned start and end of the memory region and one for the real, potentially misaligned, start and end addresses of the memory region descriptor (the actual start/end addresses of the resource the region is meant to map). s into prev s into prev Signed-off-by: Sergiu Moga Approved-by: Michalis Pappas Reviewed-by: Michalis Pappas Reviewed-by: Razvan Virtan GitHub-Closes: #1212 --- include/uk/arch/paging.h | 11 + include/uk/plat/memory.h | 10 +- lib/vfscore/automount.c | 21 +- plat/common/bootinfo.c | 20 +- plat/common/bootinfo_fdt.c | 47 +-- plat/common/include/uk/plat/common/bootinfo.h | 16 +- plat/common/include/uk/plat/common/memory.h | 60 ++-- plat/common/memory.c | 303 +++++++++--------- plat/common/paging.c | 41 ++- plat/kvm/arm/firecracker_bpt64.S | 2 + plat/kvm/arm/qemu_bpt64.S | 2 + plat/kvm/efi.c | 12 + plat/kvm/x86/lxboot.c | 32 +- plat/kvm/x86/multiboot.c | 32 +- plat/kvm/x86/pagetable64.S | 2 + support/scripts/mkbootinfo.py | 18 +- 16 files changed, 354 insertions(+), 275 deletions(-) diff --git a/include/uk/arch/paging.h b/include/uk/arch/paging.h index 49dab19e..66caaa30 100644 --- a/include/uk/arch/paging.h +++ b/include/uk/arch/paging.h @@ -364,6 +364,17 @@ int PAGE_Lx_IS(__pte_t pte, unsigned int lvl); #define _PT_PAGES(lvls, pages) __PT_PAGES(lvls, pages) #define PT_PAGES(pages) _PT_PAGES(PT_LEVELS, pages) +/** PAGE_COUNT(len) macro + * + * Computes the total number of pages required to map an area of a given + * length. + * + * @param len length of the area to map + * + * @return number of pages required to map an area of a given length + */ +#define PAGE_COUNT(len) DIV_ROUND_UP((len), PAGE_SIZE) + /** * Tests if a certain range of virtual addresses is valid on the current * architecture. For example, most 64-bit architectures do not fully implement diff --git a/include/uk/plat/memory.h b/include/uk/plat/memory.h index 4cefec8a..c3be4a8e 100644 --- a/include/uk/plat/memory.h +++ b/include/uk/plat/memory.h @@ -74,12 +74,16 @@ extern "C" { * Descriptor of a memory region */ struct ukplat_memregion_desc { - /** Physical base address */ + /** Physical page-aligned base address of the region */ __paddr_t pbase; - /** Virtual base address */ + /** Virtual page-aligned base address of the region */ __vaddr_t vbase; - /** Length in bytes */ + /** Offset where the resource starts in the region's first page */ + __off pg_off; + /** Length in bytes of the resource inside this region */ __sz len; + /** Number of pages the end-to-end aligned region occupies */ + __sz pg_count; /** Memory region type (see UKPLAT_MEMRT_*) */ __u16 type; /** Memory region flags (see UKPLAT_MEMRF_*) */ diff --git a/lib/vfscore/automount.c b/lib/vfscore/automount.c index 8cfb5ee4..045ca4d9 100644 --- a/lib/vfscore/automount.c +++ b/lib/vfscore/automount.c @@ -128,7 +128,26 @@ static int vfscore_mount_initrd_volume(struct vfscore_volume *vv) if (unlikely(rc < 0)) { uk_pr_crit("Could not find an initrd!\n"); - return -1; + rc = vfscore_get_initrd0(&initrd); + if (unlikely(rc < 0)) { + uk_pr_crit("Could not find an initrd!\n"); + return -1; + } + + vbase = (void *)initrd->vbase + initrd->pg_off; + vlen = initrd->len; + } +#if CONFIG_LIBVFSCORE_AUTOMOUNT_EINITRD + else if (!strcmp(vv->sdev, LIBVFSCORE_EXTRACT_DEV_EMBEDDED)) { + vbase = (const void *)vfscore_einitrd_start; + vlen = (size_t)((uintptr_t)&vfscore_einitrd_end - + (uintptr_t)vfscore_einitrd_start); + } +#endif /* CONFIG_LIBVFSCORE_AUTOMOUNT_EINITRD */ + else { + uk_pr_crit("\"%s\" is an invalid or unsupported initrd source!\n", + vv->sdev); + return -EINVAL; } return do_mount_initrd((void *)initrd->vbase, initrd->len, diff --git a/plat/common/bootinfo.c b/plat/common/bootinfo.c index d946a30e..a12a6839 100644 --- a/plat/common/bootinfo.c +++ b/plat/common/bootinfo.c @@ -98,18 +98,18 @@ void ukplat_bootinfo_print(void) break; } - uk_pr_info(" %012lx-%012lx %012lx %c%c%c %016lx %s %s\n", - mrd->pbase, mrd->pbase + mrd->len, mrd->len, - (mrd->flags & UKPLAT_MEMRF_READ) ? 'r' : '-', - (mrd->flags & UKPLAT_MEMRF_WRITE) ? 'w' : '-', - (mrd->flags & UKPLAT_MEMRF_EXECUTE) ? 'x' : '-', - mrd->vbase, - type, + uk_pr_info( + " %012lx-%012lx %012lx-%012lx %c%c%c %016lx %s %s\n", mrd->pbase, + mrd->pbase + mrd->pg_count * PAGE_SIZE, mrd->pbase + mrd->pg_off, + mrd->pbase + mrd->pg_off + mrd->len, + (mrd->flags & UKPLAT_MEMRF_READ) ? 'r' : '-', + (mrd->flags & UKPLAT_MEMRF_WRITE) ? 'w' : '-', + (mrd->flags & UKPLAT_MEMRF_EXECUTE) ? 'x' : '-', mrd->vbase, type, #ifdef CONFIG_UKPLAT_MEMRNAME - mrd->name + mrd->name #else /* CONFIG_UKPLAT_MEMRNAME */ - "" + "" #endif /* !CONFIG_UKPLAT_MEMRNAME */ - ); + ); } } diff --git a/plat/common/bootinfo_fdt.c b/plat/common/bootinfo_fdt.c index ee734c2b..7a8efcf0 100644 --- a/plat/common/bootinfo_fdt.c +++ b/plat/common/bootinfo_fdt.c @@ -52,10 +52,13 @@ static void fdt_bootinfo_mem_mrd(struct ukplat_bootinfo *bi, void *fdtp) mrd.len = mem_sz; if (!mrd.len) continue; - mrd.vbase = (__vaddr_t)mem_base; - mrd.pbase = (__paddr_t)mem_base; + + mrd.pbase = (__paddr_t)PAGE_ALIGN_DOWN(mem_base); + mrd.vbase = (__vaddr_t)PAGE_ALIGN_DOWN(mem_base); + mrd.pg_off = mem_base - mrd.pbase; mrd.type = UKPLAT_MEMRT_FREE; mrd.flags = UKPLAT_MEMRF_READ | UKPLAT_MEMRF_WRITE; + mrd.pg_count = PAGE_COUNT(mrd.pg_off + mrd.len); rc = ukplat_memregion_list_insert(&bi->mrds, &mrd); if (unlikely(rc < 0)) @@ -66,18 +69,17 @@ static void fdt_bootinfo_mem_mrd(struct ukplat_bootinfo *bi, void *fdtp) } - /* - * Check that we are not placed at the top of - * the memory region - */ - mrd.len = __BASE_ADDR - mem_base; + /* Check that we are not placed at the top of the memory region */ + mrd.len = __BASE_ADDR - mem_base; if (!mrd.len) goto end_mrd; - mrd.vbase = (__vaddr_t)mem_base; - mrd.pbase = (__paddr_t)mem_base; + mrd.pbase = (__paddr_t)PAGE_ALIGN_DOWN(mem_base); + mrd.vbase = (__vaddr_t)PAGE_ALIGN_DOWN(mem_base); + mrd.pg_off = mem_base - mrd.pbase; mrd.type = UKPLAT_MEMRT_FREE; mrd.flags = UKPLAT_MEMRF_READ | UKPLAT_MEMRF_WRITE; + mrd.pg_count = PAGE_COUNT(mrd.pg_off + mrd.len); rc = ukplat_memregion_list_insert(&bi->mrds, &mrd); if (unlikely(rc < 0)) @@ -93,10 +95,12 @@ end_mrd: if (!mrd.len) return; - mrd.vbase = (__vaddr_t)__END; - mrd.pbase = (__paddr_t)__END; + mrd.pbase = (__paddr_t)PAGE_ALIGN_DOWN(__END); + mrd.vbase = (__vaddr_t)PAGE_ALIGN_DOWN(__END); + mrd.pg_off = __END - mrd.pbase; mrd.type = UKPLAT_MEMRT_FREE; mrd.flags = UKPLAT_MEMRF_READ | UKPLAT_MEMRF_WRITE; + mrd.pg_count = PAGE_COUNT(mrd.pg_off + mrd.len); rc = ukplat_memregion_list_insert(&bi->mrds, &mrd); if (unlikely(rc < 0)) @@ -154,6 +158,7 @@ static void fdt_bootinfo_initrd_mrd(struct ukplat_bootinfo *bi, void *fdtp) const __u64 *fdt_initrd_start; const __u64 *fdt_initrd_end; int start_len, end_len; + __u64 initrd_base; int nchosen; int rc; @@ -171,10 +176,12 @@ static void fdt_bootinfo_initrd_mrd(struct ukplat_bootinfo *bi, void *fdtp) if (unlikely(!fdt_initrd_end || end_len <= 0)) return; - mrd.vbase = initrd_addr(fdt_initrd_start[0], start_len); - mrd.pbase = initrd_addr(fdt_initrd_start[0], start_len); - mrd.len = initrd_addr(fdt_initrd_end[0], end_len) - - initrd_addr(fdt_initrd_start[0], start_len); + initrd_base = initrd_addr(fdt_initrd_start[0], start_len); + mrd.pbase = PAGE_ALIGN_DOWN(initrd_base); + mrd.vbase = mrd.pbase; + mrd.pg_off = initrd_base - mrd.pbase; + mrd.len = initrd_addr(fdt_initrd_end[0], end_len) - initrd_base; + mrd.pg_count = PAGE_COUNT(mrd.pg_off + mrd.len); mrd.type = UKPLAT_MEMRT_INITRD; mrd.flags = UKPLAT_MEMRF_READ | UKPLAT_MEMRF_MAP; @@ -188,10 +195,12 @@ static void fdt_bootinfo_fdt_mrd(struct ukplat_bootinfo *bi, void *fdtp) struct ukplat_memregion_desc mrd = {0}; int rc; - mrd.vbase = (__vaddr_t)fdtp; - mrd.pbase = (__paddr_t)fdtp; - mrd.len = fdt_totalsize(fdtp); - mrd.type = UKPLAT_MEMRT_DEVICETREE; + mrd.pbase = (__paddr_t)PAGE_ALIGN_DOWN((__uptr)fdtp); + mrd.vbase = (__vaddr_t)PAGE_ALIGN_DOWN((__uptr)fdtp); + mrd.pg_off = (__u64)fdtp - mrd.pbase; + mrd.len = fdt_totalsize(fdtp); + mrd.pg_count = PAGE_COUNT(mrd.pg_off + mrd.len); + mrd.type = UKPLAT_MEMRT_DEVICETREE; mrd.flags = UKPLAT_MEMRF_READ | UKPLAT_MEMRF_MAP; rc = ukplat_memregion_list_insert(&bi->mrds, &mrd); diff --git a/plat/common/include/uk/plat/common/bootinfo.h b/plat/common/include/uk/plat/common/bootinfo.h index 431f48a8..f88fafc7 100644 --- a/plat/common/include/uk/plat/common/bootinfo.h +++ b/plat/common/include/uk/plat/common/bootinfo.h @@ -52,17 +52,17 @@ struct ukplat_bootinfo { UK_CTASSERT(sizeof(struct ukplat_bootinfo) == 80); -#ifdef CONFIG_UKPLAT_MEMRNAME +#if CONFIG_UKPLAT_MEMRNAME #if __SIZEOF_LONG__ == 8 -UK_CTASSERT(sizeof(struct ukplat_memregion_desc) == 64); -#else /* __SIZEOF_LONG__ == 8 */ -UK_CTASSERT(sizeof(struct ukplat_memregion_desc) == 52); +UK_CTASSERT(sizeof(struct ukplat_memregion_desc) == 80); +#else /* __SIZEOF_LONG__ != 8 */ +UK_CTASSERT(sizeof(struct ukplat_memregion_desc) == 60); #endif /* __SIZEOF_LONG__ != 8 */ -#else /* CONFIG_UKPLAT_MEMRNAME */ +#else /* !CONFIG_UKPLAT_MEMRNAME */ #if __SIZEOF_LONG__ == 8 -UK_CTASSERT(sizeof(struct ukplat_memregion_desc) == 32); -#else /* __SIZEOF_LONG__ == 8 */ -UK_CTASSERT(sizeof(struct ukplat_memregion_desc) == 16); +UK_CTASSERT(sizeof(struct ukplat_memregion_desc) == 48); +#else /* __SIZEOF_LONG__ != 8 */ +UK_CTASSERT(sizeof(struct ukplat_memregion_desc) == 24); #endif /* __SIZEOF_LONG__ != 8 */ #endif /* !CONFIG_UKPLAT_MEMRNAME */ diff --git a/plat/common/include/uk/plat/common/memory.h b/plat/common/include/uk/plat/common/memory.h index f9da5d35..bca77ee3 100644 --- a/plat/common/include/uk/plat/common/memory.h +++ b/plat/common/include/uk/plat/common/memory.h @@ -187,31 +187,33 @@ ukplat_memregion_list_insert_legacy_hi_mem(struct ukplat_memregion_list *list) /* Note that we are mapping it as writable as well to cope with the * potential existence of the VGA framebuffer/SMM shadow memory. */ - rc = ukplat_memregion_list_insert(list, - &(struct ukplat_memregion_desc){ - .vbase = X86_HI_MEM_START, - .pbase = X86_HI_MEM_START, - .len = X86_HI_MEM_LEN, - .type = UKPLAT_MEMRT_RESERVED, - .flags = UKPLAT_MEMRF_READ | - UKPLAT_MEMRF_WRITE | - UKPLAT_MEMRF_MAP, - }); + rc = ukplat_memregion_list_insert( + list, + &(struct ukplat_memregion_desc){ + .pbase = X86_HI_MEM_START, + .vbase = X86_HI_MEM_START, + .pg_off = 0, + .len = X86_HI_MEM_LEN, + .pg_count = PAGE_COUNT(X86_HI_MEM_LEN), + .type = UKPLAT_MEMRT_RESERVED, + .flags = UKPLAT_MEMRF_READ | UKPLAT_MEMRF_WRITE | UKPLAT_MEMRF_MAP, + }); if (unlikely(rc < 0)) return rc; /* Keep compatibility with other possible reports of reserved memory * regions of this area and mark the BIOS System Memory as read-only. */ - rc = ukplat_memregion_list_insert(list, - &(struct ukplat_memregion_desc){ - .vbase = X86_BIOS_ROM_START, - .pbase = X86_BIOS_ROM_START, - .len = X86_BIOS_ROM_LEN, - .type = UKPLAT_MEMRT_RESERVED, - .flags = UKPLAT_MEMRF_READ | - UKPLAT_MEMRF_MAP, - }); + rc = ukplat_memregion_list_insert( + list, &(struct ukplat_memregion_desc){ + .pbase = X86_BIOS_ROM_START, + .vbase = X86_BIOS_ROM_START, + .pg_off = 0, + .len = X86_BIOS_ROM_LEN, + .pg_count = PAGE_COUNT(X86_BIOS_ROM_LEN), + .type = UKPLAT_MEMRT_RESERVED, + .flags = UKPLAT_MEMRF_READ | UKPLAT_MEMRF_MAP, + }); if (unlikely(rc < 0)) return rc; @@ -414,19 +416,19 @@ ukplat_memregion_print_desc(struct ukplat_memregion_desc *mrd) break; } - uk_pr_debug(" %012lx-%012lx %012lx %c%c%c %016lx %s %s\n", - mrd->pbase, mrd->pbase + mrd->len, mrd->len, - (mrd->flags & UKPLAT_MEMRF_READ) ? 'r' : '-', - (mrd->flags & UKPLAT_MEMRF_WRITE) ? 'w' : '-', - (mrd->flags & UKPLAT_MEMRF_EXECUTE) ? 'x' : '-', - mrd->vbase, - type, + uk_pr_debug(" %012lx-%012lx %012lx-%012lx %c%c%c %016lx %s %s\n", + mrd->pbase, mrd->pbase + mrd->pg_count * PAGE_SIZE, + mrd->pbase + mrd->pg_off, mrd->pbase + mrd->pg_off + mrd->len, + (mrd->flags & UKPLAT_MEMRF_READ) ? 'r' : '-', + (mrd->flags & UKPLAT_MEMRF_WRITE) ? 'w' : '-', + (mrd->flags & UKPLAT_MEMRF_EXECUTE) ? 'x' : '-', mrd->vbase, + type, #if CONFIG_UKPLAT_MEMRNAME - mrd->name + mrd->name #else /* !CONFIG_UKPLAT_MEMRNAME */ - "" + "" #endif /* !CONFIG_UKPLAT_MEMRNAME */ - ); + ); } #else /* !CONFIG_LIBUKDEBUG_PRINTD */ static inline void diff --git a/plat/common/memory.c b/plat/common/memory.c index cf301afa..9229ad2d 100644 --- a/plat/common/memory.c +++ b/plat/common/memory.c @@ -106,7 +106,9 @@ void *ukplat_memregion_alloc(__sz size, int type, __u16 flags) if (olen - (pstart - ostart) == size) { mrd->pbase = pstart; mrd->vbase = pstart; + mrd->pg_off = 0; mrd->len = desired_sz; + mrd->pg_count = PAGE_COUNT(desired_sz); mrd->type = type; mrd->flags = flags; @@ -114,16 +116,19 @@ void *ukplat_memregion_alloc(__sz size, int type, __u16 flags) } /* Adjust free region */ - mrd->len -= pend - mrd->pbase; + mrd->len -= pend - mrd->pbase; + mrd->pg_count = PAGE_COUNT(mrd->len); mrd->pbase = pend; mrd->vbase = (__vaddr_t)mrd->pbase; /* Insert allocated region */ - alloc_mrd.vbase = pstart; alloc_mrd.pbase = pstart; - alloc_mrd.len = desired_sz; - alloc_mrd.type = type; + alloc_mrd.vbase = pstart; + alloc_mrd.pg_off = 0; + alloc_mrd.len = desired_sz; + alloc_mrd.pg_count = PAGE_COUNT(desired_sz); + alloc_mrd.type = type; alloc_mrd.flags = flags | UKPLAT_MEMRF_MAP; bi = ukplat_bootinfo_get(); @@ -192,35 +197,52 @@ static inline void overlapping_mrd_fixup(struct ukplat_memregion_list *list, /* If the right region is contained within the left region, * drop it entirely */ - if (RANGE_CONTAIN(ml->pbase, ml->len, mr->pbase, mr->len)) { + /* This can only happen if mr is a free mrd or if it has + * the same type as ml + */ + UK_ASSERT(mr->type == UKPLAT_MEMRT_FREE || mr->type == ml->type); + + if (RANGE_CONTAIN(ml->pbase, ml->pg_count * PAGE_SIZE, mr->pbase, + mr->pg_count * PAGE_SIZE)) { mr->len = 0; + mr->pg_count = 0; - /* If the right region has a part of itself in the left region, - * drop that part of the right region only - */ + /* If the right region has a part of itself in the left region, + * drop that part of the right region only + */ } else { mr->len -= ml->pbase + ml->len - mr->pbase; - mr->pbase = ml->pbase + ml->len; + mr->pg_count = PAGE_COUNT(mr->len); + mr->pbase = ml->pbase + ml->pg_count * PAGE_SIZE; mr->vbase = mr->pbase; } /* If left memory region is of lower priority */ } else { + /* This can only happen if ml is a free mrd or if it has the + * same type as mr + */ + UK_ASSERT(ml->type == UKPLAT_MEMRT_FREE || ml->type == mr->type); + /* If the left memory region is contained within the right * region, drop it entirely */ - if (RANGE_CONTAIN(mr->pbase, mr->len, ml->pbase, ml->len)) { + if (RANGE_CONTAIN(mr->pbase, mr->pg_count * PAGE_SIZE, ml->pbase, + ml->pg_count * PAGE_SIZE)) { ml->len = 0; + ml->pg_count = 0; - /* If the left region has a part of itself in the right region, - * drop that part of the left region only and split by creating - * a new one if the left region is larger than the right region. - */ + /* If the left region has a part of itself in the right region, + * drop that part of the left region only and split by creating + * a new one if the left region is larger than the right region. + */ } else { - __sz len = ml->pbase + ml->len - mr->pbase - mr->len; + __sz len = ml->pbase + ml->pg_count * PAGE_SIZE - mr->pbase - + mr->pg_count * PAGE_SIZE; + __uptr base = PAGE_ALIGN_UP(mr->pbase + mr->len); - if (RANGE_CONTAIN(ml->pbase, ml->len, - mr->pbase, mr->len && len)) + if (RANGE_CONTAIN(ml->pbase, ml->pg_count * PAGE_SIZE, mr->pbase, + mr->pg_count * PAGE_SIZE)) /* len here is basically ml_end - mr_end. Thus, * len == 0 can happen only if mr is at the end * of the ml and we therefore ignore the rest. @@ -228,53 +250,25 @@ static inline void overlapping_mrd_fixup(struct ukplat_memregion_list *list, * somewhere in the middle of ml and we are * inserting the mrd between mr_end and ml_end */ - ukplat_memregion_list_insert_at_idx(list, - &(struct ukplat_memregion_desc){ - .vbase = mr->pbase + mr->len, - .pbase = mr->pbase + mr->len, - .len = len, - .type = ml->type, - .flags = ml->flags - }, ridx + 1); + ukplat_memregion_list_insert_at_idx( + list, + &(struct ukplat_memregion_desc){ .pbase = base, + .vbase = base, + .pg_off = 0, + .len = len, + .pg_count = + PAGE_COUNT(len), + .type = UKPLAT_MEMRT_FREE, + .flags = ml->flags }, + ridx + 1); /* Drop the fraction of ml that overlaps with mr */ - ml->len = mr->pbase - ml->pbase; + ml->len = (mr->pbase + mr->pg_off) - (ml->pbase + ml->pg_off); + ml->pg_count = PAGE_COUNT(ml->pg_off + ml->len); } } } -/* During coalescing of two memory region descriptors, we first call this - * function which would overwrite the physical and length of a given memory - * region descriptor with its equivalent page-aligned physical base and length - * if the end address of the memory region would have also been page-aligned. - * The coalesce function then, at the end, calls ukplat_memregion_restore_mrd - * to undo this. - */ -static void ukplat_memregion_align_mrd(struct ukplat_memregion_desc *mrd, - __paddr_t *opbase, __sz *olen) -{ - __paddr_t pend; - - /* Store the **original** physical base and length */ - *opbase = mrd->pbase; - *olen = mrd->len; - - if (mrd->type == UKPLAT_MEMRT_FREE) - align_free_memregion(mrd, &mrd->pbase, &mrd->len, &pend); - else - align_mapped_memregion(mrd, &mrd->pbase, &mrd->len, &pend); -} - -/* Called at the end of ukplat_memregion_list_coalesce to undo what - * ukplat_memregion_align_mrd has done. - */ -static void ukplat_memregion_restore_mrd(struct ukplat_memregion_desc *mrd, - __paddr_t opbase, __sz olen) -{ - mrd->len = olen; - mrd->pbase = opbase; -} - /* Quick function to do potentially necessary swapping of two adjacent memory * region descriptors. Here just to modularize code because * ukplat_memregion_list_coalesce was quite large already. @@ -299,10 +293,7 @@ static void ukplat_memregion_swap_if_unordered(struct ukplat_memregion_list *l, void ukplat_memregion_list_coalesce(struct ukplat_memregion_list *list) { struct ukplat_memregion_desc *m, *ml, *mr; - __paddr_t ml_opbase, mr_opbase; - __sz ml_olen, mr_olen; - int ml_prio, mr_prio; - __u8 del; /* lets us know if a deletion happened */ + int ml_prio, mr_prio; __u32 i; UK_ASSERT(list); @@ -312,8 +303,6 @@ void ukplat_memregion_list_coalesce(struct ukplat_memregion_list *list) i = 0; m = list->mrds; while (i + 1 < list->count) { - del = 0; - /* Make sure first that they are ordered. If not, swap them */ ukplat_memregion_swap_if_unordered(list, i, i + 1); @@ -335,14 +324,8 @@ void ukplat_memregion_list_coalesce(struct ukplat_memregion_list *list) uk_pr_debug("Priority of right memory region: %d\n", mr_prio); UK_ASSERT(mr_prio >= 0); - /* Store original pbase */ - ml_opbase = ml->pbase; - mr_opbase = mr->pbase; - - ukplat_memregion_align_mrd(ml, &ml_opbase, &ml_olen); - ukplat_memregion_align_mrd(mr, &mr_opbase, &mr_olen); - - if (RANGE_OVERLAP(ml->pbase, ml->len, mr->pbase, mr->len)) { + if (RANGE_OVERLAP(ml->pbase, ml->pg_count * PAGE_SIZE, mr->pbase, + mr->pg_count * PAGE_SIZE)) { /* If they are not of the same priority */ if (ml_prio != mr_prio) { uk_pr_debug("mrd's of different priority " @@ -359,7 +342,6 @@ void ukplat_memregion_list_coalesce(struct ukplat_memregion_list *list) mr_prio, i, i + 1); /* Remove dropped regions */ - del = 1; if (ml->len == 0) { uk_pr_debug("Deleting left mrd!\n"); ukplat_memregion_list_delete(list, i); @@ -370,44 +352,56 @@ void ukplat_memregion_list_coalesce(struct ukplat_memregion_list *list) i + 1); } else { i++; - del = 0; /* No deletions */ } - /* If they have the same priority, merge them. If they - * are contained within each other, drop the contained - * one. - */ + /* If they have the same priority, merge them. If they + * are contained within each other, drop the contained + * one. Do not allow merging of kernel resources, as + * their resource page offset into the region is + * important! + */ } else { + /* Kernel regions must never overlap! */ + UK_ASSERT(ml_prio != MRD_PRIO_KRNL_RSRC); + UK_ASSERT(mr_prio != MRD_PRIO_KRNL_RSRC); + /* We do not allow overlaps of same priority * and of different flags. */ UK_ASSERT(ml->flags == mr->flags); + /* We do not allow overlaps of memory regions + * whose resource page offset into their region + * is not equal to 0. Regions don't that meet + * this condition are hand-inserted by us and + * should not overlap. + */ + UK_ASSERT(!ml->pg_off); + UK_ASSERT(!mr->pg_off); + UK_ASSERT(PAGE_ALIGNED(ml->pbase)); + UK_ASSERT(PAGE_ALIGNED(mr->pbase)); + /* If the left region is contained within the * right region, drop it */ - if (RANGE_CONTAIN(mr->pbase, mr->len, - ml->pbase, ml->len)) { + if (RANGE_CONTAIN(mr->pbase, mr->pg_count * PAGE_SIZE, + ml->pbase, ml->pg_count * PAGE_SIZE)) { uk_pr_debug("Deleting left mrd!\n"); ukplat_memregion_list_delete(list, i); - del = 1; - - goto restore_mrds; + continue; - /* If the right region is contained within the - * left region, drop it - */ - } else if (RANGE_CONTAIN(ml->pbase, ml->len, - mr->pbase, mr->len)) { + /* If the right region is contained within the + * left region, drop it + */ + } else if (RANGE_CONTAIN(ml->pbase, ml->pg_count * PAGE_SIZE, + mr->pbase, mr->pg_count * PAGE_SIZE)) { uk_pr_debug("Deleting right mrd!\n"); ukplat_memregion_list_delete(list, i + 1); - del = 1; - - goto restore_mrds; + continue; } - uk_pr_debug("Merging two overlapping mrd's.\n"); + uk_pr_debug("Merging two overlapping mrds.\n"); /* If they are not contained within each other, * merge them. @@ -418,60 +412,40 @@ void ukplat_memregion_list_coalesce(struct ukplat_memregion_list *list) * overlapping region */ ml->len -= ml->pbase + ml->len - mr->pbase; + ml->pg_count = PAGE_COUNT(ml->pg_off + ml->len); /* Delete the memory region we just merged into * the previous region. */ ukplat_memregion_list_delete(list, i + 1); - del = 1; } - /* If they do not overlap but they are contiguous and have the - * same flags and priority. - */ - } else if (ml->pbase + ml->len == mr->pbase && - ml_prio == mr_prio && ml->flags == mr->flags) { + /* If they do not overlap but they are contiguous and have the + * same flags and priority. Do not merge Kernel type memregions, + * as we have to preserve pg_off's and len's. + */ + } else if (ml->pbase + ml->len == mr->pbase && ml_prio == mr_prio && + ml->flags == mr->flags && ml_prio != MRD_PRIO_KRNL_RSRC) { + /* We do not allow overlaps of memory regions + * whose resource page offset into their region + * is not equal to 0. Regions don't that meet + * this condition are hand-inserted by us and + * should not overlap. + */ + UK_ASSERT(!ml->pg_off); + UK_ASSERT(!mr->pg_off); + UK_ASSERT(PAGE_ALIGNED(ml->pbase)); + UK_ASSERT(PAGE_ALIGNED(mr->pbase)); + uk_pr_debug("Merging two contiguous mrd's.\n"); ml->len += mr->len; + ml->pg_count = PAGE_COUNT(ml->len); ukplat_memregion_list_delete(list, i + 1); - del = 1; } else { uk_pr_debug("No adjustment for these mrd's.\n"); i++; } - -restore_mrds: - if (!del) { - /* We assume only MRD_PRIO_FREE can be dropped. We want - * to maintain !MRD_PRIO_FREE start addresses and - * length so that the kernel may use them (e.g. initrd - * start address). - */ - if (ml_prio != MRD_PRIO_FREE) - ukplat_memregion_restore_mrd(ml, ml_opbase, - ml_olen); - - /* This here can only happen if two adjacent - * !MRD_PRIO_FREE regions are resolved without a - * deletion. Preserve mr's original data as well. - */ - if (mr_prio != MRD_PRIO_FREE) - ukplat_memregion_restore_mrd(mr, mr_opbase, - mr_olen); - } - - /* Update ml's vbase, since it might not be equal to pbase - * anymore. Whether we deleted ml or mr it does not matter, - * as ml is now equal to the remaining one, because - * ukplat_memregion_list_delete() removes by `memmove()`ing. - */ - ml->vbase = ml->pbase; } - - /* Make sure the last memory region always ends up being updated when - * we exit this function - */ - m[i].vbase = m[i].pbase; } int ukplat_memregion_count(void) @@ -515,28 +489,33 @@ static int ukplat_memregion_list_insert_unmaps(struct ukplat_bootinfo *bi) &unmap_end); /* After Kernel image */ - rc = ukplat_memregion_list_insert(&bi->mrds, - &(struct ukplat_memregion_desc){ - .vbase = ALIGN_UP(__END, __PAGE_SIZE), - .pbase = 0, - .len = unmap_end - - ALIGN_UP(__END, __PAGE_SIZE), - .type = 0, - .flags = UKPLAT_MEMRF_UNMAP, - }); + rc = ukplat_memregion_list_insert( + &bi->mrds, + &(struct ukplat_memregion_desc){ + .pbase = 0, + .vbase = ALIGN_UP(__END, __PAGE_SIZE), + .pg_off = 0, + .len = unmap_end - ALIGN_UP(__END, __PAGE_SIZE), + .pg_count = PAGE_COUNT(unmap_end - ALIGN_UP(__END, __PAGE_SIZE)), + .type = 0, + .flags = UKPLAT_MEMRF_UNMAP, + }); if (unlikely(rc < 0)) return rc; /* Before Kernel image */ return ukplat_memregion_list_insert( - &bi->mrds, - &(struct ukplat_memregion_desc){ - .vbase = (__vaddr_t)unmap_start, - .pbase = 0, - .len = ALIGN_DOWN(__BASE_ADDR, __PAGE_SIZE) - unmap_start, - .type = 0, - .flags = UKPLAT_MEMRF_UNMAP, - }); + &bi->mrds, + &(struct ukplat_memregion_desc){ + .pbase = 0, + .vbase = unmap_start, + .pg_off = 0, + .len = ALIGN_DOWN(__BASE_ADDR, __PAGE_SIZE) - unmap_start, + .pg_count = + PAGE_COUNT(ALIGN_DOWN(__BASE_ADDR, __PAGE_SIZE) - unmap_start), + .type = 0, + .flags = UKPLAT_MEMRF_UNMAP, + }); } int ukplat_mem_init(void) @@ -578,16 +557,17 @@ int ukplat_mem_init(void) &unmap_end); for (i = (int)bi->mrds.count - 1; i >= 0; i--) { ukplat_memregion_get(i, &mrdp); - if (mrdp->vbase >= unmap_end - || mrdp->vbase + mrdp->len <= unmap_start) { + if (mrdp->vbase >= unmap_end || + mrdp->vbase + mrdp->len <= unmap_start) { /* Region is outside the unmapped area */ uk_pr_info("Memory %012lx-%012lx outside unmapped area\n", mrdp->vbase, mrdp->vbase + mrdp->len); if (mrdp->type == UKPLAT_MEMRT_FREE) ukplat_memregion_list_delete(&bi->mrds, i); - } else if (mrdp->vbase < unmap_start - && mrdp->vbase + mrdp->len <= unmap_end) { + continue; + } else if (mrdp->vbase < unmap_start && + mrdp->vbase + mrdp->len <= unmap_end) { /* Region overlaps with mapped area */ uk_pr_info("Memory %012lx-%012lx outside unmapped area\n", mrdp->vbase, unmap_start); @@ -596,17 +576,26 @@ int ukplat_mem_init(void) mrdp->len -= unmap_start - mrdp->vbase; mrdp->vbase = mrdp->pbase = unmap_start; } - } else if (mrdp->vbase >= unmap_start - && mrdp->vbase + mrdp->len > unmap_end) { + continue; + } else if (mrdp->vbase >= unmap_start && + mrdp->vbase + mrdp->len > unmap_end) { /* Region overlaps with mapped area */ uk_pr_info("Memory %012lx-%012lx outside unmapped area\n", - unmap_end, - mrdp->vbase + mrdp->len); + unmap_end, mrdp->vbase + mrdp->pg_count * PAGE_SIZE); - if (mrdp->type == UKPLAT_MEMRT_FREE) + if (mrdp->type == UKPLAT_MEMRT_FREE) { mrdp->len -= (mrdp->vbase + mrdp->len) - unmap_end; + mrdp->pg_count = PAGE_COUNT(mrdp->len); + } + + /* Since regions are non-overlapping and ordered, we + * can stop here, as the next region would be fully + * mapped anyways + */ } + /* Region is fully mapped */ + break; } return 0; diff --git a/plat/common/paging.c b/plat/common/paging.c index 4e8e5f26..12581233 100644 --- a/plat/common/paging.c +++ b/plat/common/paging.c @@ -1441,10 +1441,7 @@ extern struct ukplat_memregion_desc bpt_unmap_mrd; int ukplat_paging_init(void) { struct ukplat_memregion_desc *mrd; - unsigned long prot; - __vaddr_t vaddr; - __paddr_t paddr; - __sz len; + unsigned long prot; int rc; /* Initialize the frame allocator with the free physical memory @@ -1456,19 +1453,19 @@ int ukplat_paging_init(void) UK_ASSERT(mrd->vbase == mrd->pbase); UK_ASSERT(!(mrd->pbase & ~PAGE_MASK)); UK_ASSERT(mrd->len); - UK_ASSERT(!(mrd->len & ~PAGE_MASK)); /* Not mapped */ mrd->vbase = __U64_MAX; mrd->flags &= ~UKPLAT_MEMRF_PERMS; if (!kernel_pt.fa) { - rc = ukplat_pt_init(&kernel_pt, mrd->pbase, mrd->len); + rc = ukplat_pt_init(&kernel_pt, mrd->pbase, + mrd->pg_count * PAGE_SIZE); if (unlikely(rc)) kernel_pt.fa = NULL; } else { rc = ukplat_pt_add_mem(&kernel_pt, mrd->pbase, - mrd->len); + mrd->pg_count * PAGE_SIZE); } /* We do not fail if we cannot add this memory region to the @@ -1491,14 +1488,19 @@ int ukplat_paging_init(void) /* Perform unmappings */ ukplat_memregion_foreach(&mrd, 0, UKPLAT_MEMRF_UNMAP, UKPLAT_MEMRF_UNMAP) { + /* Ensure unmap memory region descriptors' correctness */ + /* Must be non-empty and aligned end-to-end */ + UK_ASSERT(mrd->len); + UK_ASSERT(mrd->pg_count * PAGE_SIZE == mrd->len); + UK_ASSERT(PAGE_ALIGNED(mrd->vbase)); + UK_ASSERT(!mrd->pg_off); + /* Physical base address must be 0 */ + UK_ASSERT(!mrd->pbase); + /* Virtual base address must be a valid value */ UK_ASSERT(mrd->vbase != __U64_MAX); - vaddr = PAGE_ALIGN_DOWN(mrd->vbase); - len = PAGE_ALIGN_UP(mrd->len + (mrd->vbase - vaddr)); - - rc = ukplat_page_unmap(&kernel_pt, vaddr, - len >> PAGE_SHIFT, - PAGE_FLAG_KEEP_FRAMES); + rc = ukplat_page_unmap(&kernel_pt, mrd->vbase, mrd->pg_count, + PAGE_FLAG_KEEP_FRAMES); if (unlikely(rc)) return rc; } @@ -1506,22 +1508,19 @@ int ukplat_paging_init(void) /* Perform mappings */ ukplat_memregion_foreach(&mrd, 0, UKPLAT_MEMRF_MAP, UKPLAT_MEMRF_MAP) { + UK_ASSERT(!(mrd->vbase & ~PAGE_MASK)); UK_ASSERT(mrd->vbase != __U64_MAX); - vaddr = PAGE_ALIGN_DOWN(mrd->vbase); - paddr = PAGE_ALIGN_DOWN(mrd->pbase); - len = PAGE_ALIGN_UP(mrd->len + (mrd->vbase - vaddr)); - #if defined(CONFIG_ARCH_ARM_64) - if (!RANGE_CONTAIN(bpt_unmap_mrd.pbase, bpt_unmap_mrd.len, - paddr, len)) + if (!RANGE_CONTAIN(bpt_unmap_mrd.pbase, bpt_unmap_mrd.len, mrd->pbase, + mrd->pg_count * PAGE_SIZE)) continue; #endif prot = bootinfo_to_page_attr(mrd->flags); - rc = ukplat_page_map(&kernel_pt, vaddr, paddr, - len >> PAGE_SHIFT, prot, 0); + rc = ukplat_page_map(&kernel_pt, mrd->vbase, mrd->pbase, mrd->pg_count, + prot, 0); if (unlikely(rc)) return rc; } diff --git a/plat/kvm/arm/firecracker_bpt64.S b/plat/kvm/arm/firecracker_bpt64.S index 31c796d8..e5def6ed 100644 --- a/plat/kvm/arm/firecracker_bpt64.S +++ b/plat/kvm/arm/firecracker_bpt64.S @@ -22,8 +22,10 @@ bpt_unmap_mrd: .quad 0x0000000080000000 /* 1 GiB */ .quad 0x0000000080000000 /* 1 GiB */ + .quad 0x0000000000000000 /* Page-aligned */ /* FIXME: Unmap to 1TiB */ .quad (255 - 1) * 0x0000000080000000 + .quad (255 - 1) * 0x0000000000080000 /* Page count */ .short 0x0000000000000000 .short 0x0000000000000010 /* UKPLAT_MEMRF_UNMAP */ .space 36 diff --git a/plat/kvm/arm/qemu_bpt64.S b/plat/kvm/arm/qemu_bpt64.S index 9ececa73..0a4f2ca0 100644 --- a/plat/kvm/arm/qemu_bpt64.S +++ b/plat/kvm/arm/qemu_bpt64.S @@ -24,6 +24,7 @@ bpt_unmap_mrd: .quad 0x0000000040000000 /* 1 GiB */ .quad 0x0000000040000000 /* 1 GiB */ + .quad 0x0000000000000000 /* Page-aligned */ /* QEMU-VIRT's legacy RAM max is 255 GiB, but it can also depend on the * settings, see QEMU upstream commit: * 50a17297e2f0c ("hw/arm/virt: Bump the 255GB initial RAM limit"). @@ -31,6 +32,7 @@ bpt_unmap_mrd: * high memory. */ .quad (255 - 1) * 0x0000000040000000 + .quad (255 - 1) * 0x0000000000040000 /* Page count */ .short 0x0000000000000000 .short 0x0000000000000010 /* UKPLAT_MEMRF_UNMAP */ .space 36 diff --git a/plat/kvm/efi.c b/plat/kvm/efi.c index 0cd81bf7..29cb1df2 100644 --- a/plat/kvm/efi.c +++ b/plat/kvm/efi.c @@ -213,6 +213,10 @@ static int uk_efi_md_to_bi_mrd(struct uk_efi_mem_desc *const md, mrd->vbase = start; mrd->len = end - start; + /* All UEFI memory regions are page-aligned */ + mrd->pg_off = 0; + mrd->pg_count = md->number_of_pages; + return 0; } @@ -338,6 +342,8 @@ static void uk_efi_rt_md_to_bi_mrds(struct ukplat_memregion_desc **rt_mrds, rt_mrd = *rt_mrds + i; rt_mrd->pbase = mat_md->physical_start; rt_mrd->len = mat_md->number_of_pages * UK_EFI_PAGE_SIZE; + rt_mrd->pg_off = 0; + rt_mrd->pg_count = mat_md->number_of_pages; rt_mrd->vbase = rt_mrd->pbase; rt_mrd->type = UKPLAT_MEMRT_RESERVED; rt_mrd->flags = UKPLAT_MEMRF_MAP; @@ -547,7 +553,9 @@ static void uk_efi_setup_bootinfo_cmdl(struct ukplat_bootinfo *bi) mrd.pbase = (__paddr_t)cmdl; mrd.vbase = (__vaddr_t)cmdl; + mrd.pg_off = 0; mrd.len = len; + mrd.pg_count = PAGE_COUNT(len); mrd.type = UKPLAT_MEMRT_CMDLINE; mrd.flags = UKPLAT_MEMRF_READ | UKPLAT_MEMRF_MAP; rc = ukplat_memregion_list_insert(&bi->mrds, &mrd); @@ -577,7 +585,9 @@ static void uk_efi_setup_bootinfo_initrd(struct ukplat_bootinfo *bi) mrd.pbase = (__paddr_t)initrd; mrd.vbase = (__vaddr_t)initrd; + mrd.pg_off = 0; mrd.len = len; + mrd.pg_count = PAGE_COUNT(len); mrd.type = UKPLAT_MEMRT_INITRD; mrd.flags = UKPLAT_MEMRF_READ | UKPLAT_MEMRF_MAP; rc = ukplat_memregion_list_insert(&bi->mrds, &mrd); @@ -604,7 +614,9 @@ static void uk_efi_setup_bootinfo_dtb(struct ukplat_bootinfo *bi) mrd.pbase = (__paddr_t)dtb; mrd.vbase = (__vaddr_t)dtb; + mrd.pg_off = 0; mrd.len = len; + mrd.pg_count = PAGE_COUNT(len); mrd.type = UKPLAT_MEMRT_DEVICETREE; mrd.flags = UKPLAT_MEMRF_READ | UKPLAT_MEMRF_MAP; rc = ukplat_memregion_list_insert(&bi->mrds, &mrd); diff --git a/plat/kvm/x86/lxboot.c b/plat/kvm/x86/lxboot.c index d197d4c5..9d6a70dd 100644 --- a/plat/kvm/x86/lxboot.c +++ b/plat/kvm/x86/lxboot.c @@ -39,10 +39,12 @@ lxboot_init_cmdline(struct ukplat_bootinfo *bi, struct lxboot_params *bp) if (cmdline_size == 0) return; - mrd.pbase = cmdline_addr; - mrd.vbase = cmdline_addr; - mrd.len = cmdline_size; - mrd.type = UKPLAT_MEMRT_CMDLINE; + mrd.pbase = PAGE_ALIGN_DOWN(cmdline_addr); + mrd.vbase = mrd.pbase; + mrd.pg_off = cmdline_addr - mrd.pbase; + mrd.len = cmdline_size; + mrd.pg_count = PAGE_COUNT(mrd.pg_off + mrd.len); + mrd.type = UKPLAT_MEMRT_CMDLINE; mrd.flags = UKPLAT_MEMRF_READ | UKPLAT_MEMRF_MAP; #ifdef CONFIG_UKPLAT_MEMRNAME memcpy(mrd.name, "cmdline", sizeof("cmdline")); @@ -75,11 +77,13 @@ lxboot_init_initrd(struct ukplat_bootinfo *bi, struct lxboot_params *bp) if (initrd_addr == 0 || initrd_size == 0) return; - mrd.type = UKPLAT_MEMRT_INITRD; - mrd.flags = UKPLAT_MEMRF_MAP | UKPLAT_MEMRF_READ; - mrd.vbase = initrd_addr; - mrd.pbase = initrd_addr; - mrd.len = initrd_size; + mrd.pbase = PAGE_ALIGN_DOWN(initrd_addr); + mrd.vbase = mrd.pbase; + mrd.pg_off = initrd_addr - mrd.pbase; + mrd.len = initrd_size; + mrd.type = UKPLAT_MEMRT_INITRD; + mrd.pg_count = PAGE_COUNT(mrd.pg_off + initrd_size); + mrd.flags = UKPLAT_MEMRF_MAP | UKPLAT_MEMRF_READ; #ifdef CONFIG_UKPLAT_MEMRNAME memcpy(mrd.name, "initrd", sizeof("initrd")); #endif /* CONFIG_UKPLAT_MEMRNAME */ @@ -110,14 +114,20 @@ lxboot_init_mem(struct ukplat_bootinfo *bi, struct lxboot_params *bp) if (end <= start) continue; - mrd.pbase = start; - mrd.vbase = start; /* 1:1 mapping */ + mrd.pbase = PAGE_ALIGN_DOWN(start); + mrd.vbase = mrd.pbase; /* 1:1 mapping */ + mrd.pg_off = start - mrd.pbase; mrd.len = end - start; + mrd.pg_count = PAGE_COUNT(mrd.pg_off + mrd.len); if (entry->type == LXBOOT_E820_TYPE_RAM) { mrd.type = UKPLAT_MEMRT_FREE; mrd.flags = UKPLAT_MEMRF_READ | UKPLAT_MEMRF_WRITE; + /* Free memory regions have + * mrd.len == mrd.pg_count * PAGE_SIZE + */ + mrd.len = PAGE_ALIGN_UP(mrd.len); } else { mrd.type = UKPLAT_MEMRT_RESERVED; mrd.flags = UKPLAT_MEMRF_READ | UKPLAT_MEMRF_MAP; diff --git a/plat/kvm/x86/multiboot.c b/plat/kvm/x86/multiboot.c index 7a0c2799..1e164ff6 100644 --- a/plat/kvm/x86/multiboot.c +++ b/plat/kvm/x86/multiboot.c @@ -71,10 +71,13 @@ void multiboot_entry(struct lcpu *lcpu, struct multiboot_info *mi) if (mi->flags & MULTIBOOT_INFO_CMDLINE) { if (mi->cmdline) { cmdline_len = strlen((const char *)(__uptr)mi->cmdline); - mrd.pbase = mi->cmdline; - mrd.vbase = mi->cmdline; /* 1:1 mapping */ - mrd.len = cmdline_len; - mrd.type = UKPLAT_MEMRT_CMDLINE; + /* 1:1 mapping */ + mrd.pbase = PAGE_ALIGN_DOWN(mi->cmdline); + mrd.vbase = mrd.pbase; + mrd.pg_off = mi->cmdline - mrd.pbase; + mrd.len = cmdline_len; + mrd.pg_count = PAGE_COUNT(mrd.pg_off + cmdline_len); + mrd.type = UKPLAT_MEMRT_CMDLINE; mrd.flags = UKPLAT_MEMRF_READ | UKPLAT_MEMRF_MAP; mrd_insert(bi, &mrd); @@ -99,9 +102,11 @@ void multiboot_entry(struct lcpu *lcpu, struct multiboot_info *mi) if (mi->flags & MULTIBOOT_INFO_MODS) { mods = (multiboot_module_t *)(__uptr)mi->mods_addr; for (i = 0; i < mi->mods_count; i++) { - mrd.pbase = mods[i].mod_start; - mrd.vbase = mods[i].mod_start; /* 1:1 mapping */ - mrd.len = mods[i].mod_end - mods[i].mod_start; + mrd.pbase = PAGE_ALIGN_DOWN(mods[i].mod_start); + mrd.vbase = mrd.pbase; /* 1:1 mapping */ + mrd.pg_off = mods[i].mod_start - mrd.pbase; + mrd.len = mods[i].mod_end - mods[i].mod_start; + mrd.pg_count = PAGE_COUNT(mrd.pg_off + mrd.len); mrd.type = UKPLAT_MEMRT_INITRD; mrd.flags = UKPLAT_MEMRF_READ | UKPLAT_MEMRF_MAP; @@ -134,14 +139,21 @@ void multiboot_entry(struct lcpu *lcpu, struct multiboot_info *mi) if (unlikely(end <= start || end - start < PAGE_SIZE)) continue; - mrd.pbase = start; - mrd.vbase = start; /* 1:1 mapping */ - mrd.len = end - start; + mrd.pbase = PAGE_ALIGN_DOWN(start); + mrd.vbase = mrd.pbase; /* 1:1 mapping */ + mrd.pg_off = start - mrd.pbase; + mrd.len = end - start; + mrd.pg_count = PAGE_COUNT(mrd.pg_off + mrd.len); if (m->type == MULTIBOOT_MEMORY_AVAILABLE) { mrd.type = UKPLAT_MEMRT_FREE; mrd.flags = UKPLAT_MEMRF_READ | UKPLAT_MEMRF_WRITE; + + /* Free memory regions have + * mrd.len == mrd.pg_count * PAGE_SIZE + */ + mrd.len = PAGE_ALIGN_UP(mrd.len + mrd.pg_off); } else { mrd.type = UKPLAT_MEMRT_RESERVED; mrd.flags = UKPLAT_MEMRF_READ | diff --git a/plat/kvm/x86/pagetable64.S b/plat/kvm/x86/pagetable64.S index 47983acc..81f24ffd 100644 --- a/plat/kvm/x86/pagetable64.S +++ b/plat/kvm/x86/pagetable64.S @@ -117,7 +117,9 @@ bpt_unmap_mrd: .quad 0x0000000000000000 /* 0 GiB */ .quad 0x0000000000000000 /* 0 GiB */ + .quad 0x0000000000000000 /* Page-aligned */ .quad 0x0000000100000000 /* 4 GiB */ + .quad 0x0000000000100000 /* Page count */ .short 0x0000000000000000 .short 0x0000000000000010 /* UKPLAT_MEMRF_UNMAP */ .fill 36, 1, 0 diff --git a/support/scripts/mkbootinfo.py b/support/scripts/mkbootinfo.py index be73a8db..aa4f023e 100755 --- a/support/scripts/mkbootinfo.py +++ b/support/scripts/mkbootinfo.py @@ -29,6 +29,7 @@ UKPLAT_BOOTINFO_MAGIC = 0xB007B0B0 # Boot Bobo UKPLAT_BOOTINFO_VERSION = 0x01 PAGE_SIZE = 4096 +PAGE_SHIFT = 12 def main(): parser = argparse.ArgumentParser( @@ -120,21 +121,26 @@ def main(): # We have 1:1 mapping vbase = pbase + # Offset in the first page is equal to the start of the first page + pg_off = 0 # Align size up to page size size = (int(phdr[1], base=16) + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1) if size == 0: continue + pg_count = size >> PAGE_SHIFT assert nsecs < cap nsecs += 1 - secobj.write(pbase.to_bytes(8, endianness)) # pbase - secobj.write(vbase.to_bytes(8, endianness)) # vbase - secobj.write(size.to_bytes(8, endianness)) # len - secobj.write(UKPLAT_MEMRT_KERNEL.to_bytes(2, endianness)) # type - secobj.write(flags.to_bytes(2, endianness)) # flags - secobj.write(name) # name or padding + secobj.write(pbase.to_bytes(8, endianness)) # pbase + secobj.write(vbase.to_bytes(8, endianness)) # vbase + secobj.write(pg_off.to_bytes(8, endianness)) # pg_off + secobj.write(size.to_bytes(8, endianness)) # len + secobj.write(pg_count.to_bytes(8, endianness)) # pg_count + secobj.write(UKPLAT_MEMRT_KERNEL.to_bytes(2, endianness)) # type + secobj.write(flags.to_bytes(2, endianness)) # flags + secobj.write(name) # name or padding # Update the number of memory regions secobj.seek(UKPLAT_BOOTINFO_SIZE - 4, os.SEEK_SET) -- Gitee From a6ece9b40419618401ef153455e823bcd360d186 Mon Sep 17 00:00:00 2001 From: Sergiu Moga Date: Sun, 29 Oct 2023 10:27:34 +0200 Subject: [PATCH 02/18] uk/plat: Add `UK_ASSERT_VALID_MRD_*` to assert on invalid mrd types Implement a memory region type specific spin-off of `UK_ASSERT` that will throw assertions specific to the type of the memory region being asserted As a basis, MRDs must meet the following, generic, criteria: - must be of only one valid type as per `UK_ASSERT_VALID_MRD_TYPE` - must only have valid flags as per `UK_ASSERT_VALID_MRD_FLAGS` - memory region is not empty or of length 0 - virtual/physical base addresses are page-aligned - resource in-page offset must be in the range [0, PAGE_SIZE) For now, two additional specialized MRD validation macros were added. In the case of `UK_ASSERT_VALID_FREE_MRD`, they must meet the following criteria: - must meet the criteria of a general valid memory region descriptor - virtual/physical base addresses are equal - region is aligned end-to-end, therefore length is multiple of PAGE_SIZE times region's page count and the resource's in-page offset must be 0 In the case of `UK_ASSERT_VALID_KERNEL_MRD`, they must meet the - must meet the criteria of a general valid memory region descriptor - region is aligned end-to-end, therefore length is multiple of PAGE_SIZE times region's page count and the resource's in-page offset must be 0 That being said, with the addition of these macros, sprinkle around some mrd validations. After successfully fetching the `initrd` memory region, use `UK_ASSERT_VALID_MRD to validate it against a predefined set of criteria that generally apply to all memory regions, regardless of type. `ukplat_memregion_alloc` makes use of free memory regions in order to break a chunk out of them and assign it a given type depending on the requested allocation. Make sure that the free memory region we use meets the criteria that all free memory regions must have by using `UK_ASSERT_VALID_FREE_MRD` before modifying it. Validate all memory regions that we process during the execution of `ukplat_memregion_list_coalesce` against the general criteria that must be met by all memory region descriptors. When not using `HAVE_PAGING`, we go through each memory region in the list and drop those that are outside the static boot page tables since we can't extend it. Thus, make sure we also make sure that these memory region descriptors are valid w.r.t. the general criteria that must be met by all memory region descriptors. Properly validate mrds during `ukplat_paging_init` such that: - for the first loop that adds free memory region descriptors to the frame allocator, we validate with `UK_ASSERT_VALID_FREE_MRD` - for the second loop that unmaps according to `UKPLAT_MEMRF_UNMAP` memory region descriptors, we have a one use-case, very particular subset of conditions that must be met by this kind of memory regions only: - must be non-empty and aligned end-to-end, i.e. length must be !0 and the page count times PAGE_SIZE must be equal to length - virtual base address must be page-aligned and a valid value - physical base address must be 0 - page offset must be 0 - for the last loop, the one that processes `UKPLAT_MEMRF_MAP` memory region descriptors, we enforce the general rules, i.e. use `UK_ASSERT_VALID_MRD` and, furthermore, we also ensure that we not somehow map free memory regions. Lastly, when enforcing `W^X` we go through memory regions and get permissions from there. Ensure that these processed memory region descriptors are valid w.r.t. the predefined criteria that must be met by all memory region descriptors. NOTE: This is an initial attempt at validating as many directly referenced memory region descriptors as possible. It is not exhaustive and does not guarantee validating every single possible reference. Signed-off-by: Sergiu Moga Approved-by: Michalis Pappas Reviewed-by: Michalis Pappas Reviewed-by: Razvan Virtan GitHub-Closes: #1212 --- include/uk/plat/memory.h | 121 +++++++++++++++++++++++++++++++++++++++ lib/ukboot/boot.c | 6 +- lib/ukreloc/reloc.c | 2 + lib/vfscore/automount.c | 2 + plat/common/memory.c | 18 +++--- plat/common/paging.c | 9 ++- plat/common/w_xor_x.c | 2 + 7 files changed, 143 insertions(+), 17 deletions(-) diff --git a/include/uk/plat/memory.h b/include/uk/plat/memory.h index c3be4a8e..d43edff2 100644 --- a/include/uk/plat/memory.h +++ b/include/uk/plat/memory.h @@ -37,6 +37,9 @@ #include #include #include +#include +#include +#include #include #include #include @@ -94,6 +97,124 @@ struct ukplat_memregion_desc { #endif /* CONFIG_UKPLAT_MEMRNAME */ } __packed __align(__SIZEOF_LONG__); +/** UK_ASSERT_VALID_MRD_TYPE(mrd) macro + * + * Ensure a given memory region descriptor has one of the following defined + * types only: + * UKPLAT_MEMRT_FREE Uninitialized memory + * UKPLAT_MEMRT_RESERVED In use by platform + * UKPLAT_MEMRT_KERNEL Kernel binary segment + * UKPLAT_MEMRT_INITRD Initramdisk + * UKPLAT_MEMRT_CMDLINE Command line + * UKPLAT_MEMRT_DEVICETREE Device tree + * UKPLAT_MEMRT_STACK Thread stack + * @param mrd pointer to the memory region descriptor whose type to validate + */ +#define UK_ASSERT_VALID_MRD_TYPE(mrd) \ + do { \ + switch ((mrd)->type) { \ + case UKPLAT_MEMRT_FREE: \ + __fallthrough; \ + case UKPLAT_MEMRT_RESERVED: \ + __fallthrough; \ + case UKPLAT_MEMRT_KERNEL: \ + __fallthrough; \ + case UKPLAT_MEMRT_INITRD: \ + __fallthrough; \ + case UKPLAT_MEMRT_CMDLINE: \ + __fallthrough; \ + case UKPLAT_MEMRT_DEVICETREE: \ + __fallthrough; \ + case UKPLAT_MEMRT_STACK: \ + break; \ + default: \ + UK_CRASH("Invalid mrd type: %hu\n", \ + (mrd)->type); \ + } \ + } while (0) + +/** UK_ASSERT_VALID_MRD_FLAGS(mrd) macro + * + * Ensure a given memory region descriptor has one of the following defined + * flags only: + * UKPLAT_MEMRF_READ Region is readable + * UKPLAT_MEMRF_WRITE Region is writable + * UKPLAT_MEMRF_EXECUTE Region is executable + * UKPLAT_MEMRF_UNMAP Must be unmapped at boot + * UKPLAT_MEMRF_MAP Must be mapped at boot + * + * @param mrd pointer to the memory region descriptor whose type to validate + */ +#define UK_ASSERT_VALID_MRD_FLAGS(mrd) \ + do { \ + __u16 flags_all = UKPLAT_MEMRF_READ | \ + UKPLAT_MEMRF_WRITE | \ + UKPLAT_MEMRF_EXECUTE | \ + UKPLAT_MEMRF_UNMAP | \ + UKPLAT_MEMRF_MAP; \ + \ + UK_ASSERT(((mrd)->flags & flags_all) == (mrd)->flags); \ + } while (0) + +/** UK_ASSERT_VALID_MRD(mrd) macro + * + * Ensure memory region descriptor general correctness: + * - must be of only one valid type as per UK_ASSERT_VALID_MRD_TYPE + * - must only have valid flags as per UK_ASSERT_VALID_MRD_FLAGS + * - memory region is not empty or of length 0 + * - virtual/physical base addresses are page-aligned + * - resource in-page offset must be in the range [0, PAGE_SIZE) + * + * @param mrd pointer to the free memory region descriptor to validate + */ +#define UK_ASSERT_VALID_MRD(mrd) \ + do { \ + UK_ASSERT_VALID_MRD_TYPE((mrd)); \ + UK_ASSERT_VALID_MRD_FLAGS((mrd)); \ + UK_ASSERT(PAGE_ALIGNED((mrd)->vbase)); \ + UK_ASSERT(PAGE_ALIGNED((mrd)->pbase)); \ + UK_ASSERT((mrd)->pg_off >= 0 && \ + (mrd)->pg_off < (__off)PAGE_SIZE); \ + } while (0) + +/** UK_ASSERT_VALID_FREE_MRD(mrd) macro + * + * Ensure free memory region descriptor particular correctness: + * - must meet the criteria of a general valid memory region descriptor + * - virtual/physical base addresses are equal + * - region is aligned end-to-end, therefore length is multiple of + * PAGE_SIZE times region's page count and the resource's + * in-page offset must be 0 + * + * @param mrd pointer to the free memory region descriptor to validate + */ +#define UK_ASSERT_VALID_FREE_MRD(mrd) \ + do { \ + UK_ASSERT_VALID_MRD((mrd)); \ + UK_ASSERT((mrd)->type == UKPLAT_MEMRT_FREE); \ + UK_ASSERT((mrd)->vbase == (mrd)->pbase); \ + UK_ASSERT((mrd)->pg_count * PAGE_SIZE == (mrd)->len); \ + UK_ASSERT(!(mrd)->pg_off); \ + } while (0) + +/** UK_ASSERT_VALID_KERNEL_MRD(mrd) macro + * + * Ensure kernel memory region descriptor particular correctness: + * - must meet the criteria of a general valid memory region descriptor + * - region is aligned end-to-end, therefore length is multiple of + * PAGE_SIZE times region's page count and the resource's + * in-page offset must be 0 + * + * @param mrd pointer to the kernel memory region descriptor to validate + */ +#define UK_ASSERT_VALID_KERNEL_MRD(mrd) \ + do { \ + UK_ASSERT_VALID_MRD((mrd)); \ + UK_ASSERT((mrd)->type == UKPLAT_MEMRT_KERNEL); \ + UK_ASSERT((mrd)->pg_count * PAGE_SIZE == (mrd)->len); \ + UK_ASSERT(!(mrd)->pg_off); \ + } while (0) + /** * Check whether the memory region descriptor overlaps with [pstart, pend) in * the physical address space. diff --git a/lib/ukboot/boot.c b/lib/ukboot/boot.c index b826c242..8e0c4906 100644 --- a/lib/ukboot/boot.c +++ b/lib/ukboot/boot.c @@ -88,7 +88,6 @@ #ifdef CONFIG_LIBUKSP #include #endif -#include #include #include #if CONFIG_LIBUKBOOT_MAINTHREAD @@ -208,10 +207,7 @@ static struct uk_alloc *heap_init() * add every subsequent region to it. */ ukplat_memregion_foreach(&md, UKPLAT_MEMRT_FREE, 0, 0) { - UK_ASSERT(md->vbase == md->pbase); - UK_ASSERT(!(md->pbase & ~PAGE_MASK)); - UK_ASSERT(md->len); - UK_ASSERT(!(md->len & ~PAGE_MASK)); + UK_ASSERT_VALID_FREE_MRD(md); uk_pr_debug("Trying %p-%p 0x%02x %s\n", (void *)md->vbase, (void *)(md->vbase + md->len), diff --git a/lib/ukreloc/reloc.c b/lib/ukreloc/reloc.c index 75f9e13a..5295d521 100644 --- a/lib/ukreloc/reloc.c +++ b/lib/ukreloc/reloc.c @@ -124,6 +124,8 @@ void do_uk_reloc_kmrds(__paddr_t r_paddr, __vaddr_t r_vaddr) * since they contain the link-time addresses, relative to rt_baddr. */ ukplat_memregion_foreach(&mrdp, UKPLAT_MEMRT_KERNEL, 0, 0) { + UK_ASSERT_VALID_KERNEL_MRD(mrdp); + mrdp->pbase -= (__paddr_t)lt_baddr; mrdp->pbase += r_paddr; mrdp->vbase -= (__vaddr_t)lt_baddr; diff --git a/lib/vfscore/automount.c b/lib/vfscore/automount.c index 045ca4d9..aa383c01 100644 --- a/lib/vfscore/automount.c +++ b/lib/vfscore/automount.c @@ -134,6 +134,8 @@ static int vfscore_mount_initrd_volume(struct vfscore_volume *vv) return -1; } + UK_ASSERT_VALID_MRD(initrd); + vbase = (void *)initrd->vbase + initrd->pg_off; vlen = initrd->len; } diff --git a/plat/common/memory.c b/plat/common/memory.c index 9229ad2d..f197e207 100644 --- a/plat/common/memory.c +++ b/plat/common/memory.c @@ -77,9 +77,11 @@ void *ukplat_memregion_alloc(__sz size, int type, __u16 flags) desired_sz = size; size = ALIGN_UP(size, __PAGE_SIZE); ukplat_memregion_foreach(&mrd, UKPLAT_MEMRT_FREE, 0, 0) { + UK_ASSERT_VALID_FREE_MRD(mrd); UK_ASSERT(mrd->pbase <= __U64_MAX - size); + pstart = ALIGN_UP(mrd->pbase, __PAGE_SIZE); - pend = pstart + size; + pend = pstart + size; if (unmap_len && (!RANGE_CONTAIN(unmap_start, unmap_len, pstart, size) || @@ -91,7 +93,7 @@ void *ukplat_memregion_alloc(__sz size, int type, __u16 flags) return NULL; ostart = mrd->pbase; - olen = mrd->len; + olen = mrd->len; /* Check whether we are allocating from an in-image memory hole * or not. If no, then it is not already mapped. @@ -314,7 +316,8 @@ void ukplat_memregion_list_coalesce(struct ukplat_memregion_list *list) ukplat_memregion_print_desc(ml); ukplat_memregion_print_desc(mr); - UK_ASSERT(ml->pbase <= mr->pbase); + UK_ASSERT_VALID_MRD(ml); + UK_ASSERT_VALID_MRD(mr); ml_prio = get_mrd_prio(ml); uk_pr_debug("Priority of left memory region: %d\n", ml_prio); @@ -557,10 +560,11 @@ int ukplat_mem_init(void) &unmap_end); for (i = (int)bi->mrds.count - 1; i >= 0; i--) { ukplat_memregion_get(i, &mrdp); - if (mrdp->vbase >= unmap_end || - mrdp->vbase + mrdp->len <= unmap_start) { - /* Region is outside the unmapped area */ - uk_pr_info("Memory %012lx-%012lx outside unmapped area\n", + UK_ASSERT_VALID_MRD(mrdp); + + if (mrdp->vbase >= unmap_end) { + /* Region is outside the mapped area */ + uk_pr_info("Memory %012lx-%012lx outside mapped area\n", mrdp->vbase, mrdp->vbase + mrdp->len); if (mrdp->type == UKPLAT_MEMRT_FREE) diff --git a/plat/common/paging.c b/plat/common/paging.c index 12581233..e2f6866c 100644 --- a/plat/common/paging.c +++ b/plat/common/paging.c @@ -1450,9 +1450,7 @@ int ukplat_paging_init(void) */ rc = -ENOMEM; /* In case there is no region */ ukplat_memregion_foreach(&mrd, UKPLAT_MEMRT_FREE, 0, 0) { - UK_ASSERT(mrd->vbase == mrd->pbase); - UK_ASSERT(!(mrd->pbase & ~PAGE_MASK)); - UK_ASSERT(mrd->len); + UK_ASSERT_VALID_FREE_MRD(mrd); /* Not mapped */ mrd->vbase = __U64_MAX; @@ -1508,8 +1506,9 @@ int ukplat_paging_init(void) /* Perform mappings */ ukplat_memregion_foreach(&mrd, 0, UKPLAT_MEMRF_MAP, UKPLAT_MEMRF_MAP) { - UK_ASSERT(!(mrd->vbase & ~PAGE_MASK)); - UK_ASSERT(mrd->vbase != __U64_MAX); + UK_ASSERT_VALID_MRD(mrd); + /* Do not allow mapping of free memory regions */ + UK_ASSERT(mrd->type != UKPLAT_MEMRT_FREE); #if defined(CONFIG_ARCH_ARM_64) if (!RANGE_CONTAIN(bpt_unmap_mrd.pbase, bpt_unmap_mrd.len, mrd->pbase, diff --git a/plat/common/w_xor_x.c b/plat/common/w_xor_x.c index df27a5c8..61a71554 100644 --- a/plat/common/w_xor_x.c +++ b/plat/common/w_xor_x.c @@ -69,6 +69,8 @@ void __weak enforce_w_xor_x(void) if (d->type == UKPLAT_MEMRT_FREE) continue; + UK_ASSERT_VALID_MRD(d); + #ifdef CONFIG_ARCH_ARM_64 /* Skip RW regions. These will be protected by WXN */ if (d->flags & UKPLAT_MEMRF_WRITE) -- Gitee From d7f657832b88545f55522d2a90cf0bc67ed747b0 Mon Sep 17 00:00:00 2001 From: Michalis Pappas Date: Sun, 28 Apr 2024 11:13:50 +0200 Subject: [PATCH 03/18] plat/common/arm64: Set DIRECTMAP_AREA_END to the end of low VA range The direct-mapped area maps the first 512GiB of the address space to an architecture-defined region. In arm64 that uses the highest 512GiB of the low VA range. Update DIRECTMAP_AREA_END to correctly specify the end of the low VA range. Signed-off-by: Michalis Pappas Reviewed-by: Sergiu Moga Reviewed-by: Serban Sorohan Approved-by: Razvan Deaconescu GitHub-Closes: #1373 --- plat/common/include/arm/arm64/paging.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plat/common/include/arm/arm64/paging.h b/plat/common/include/arm/arm64/paging.h index b615ce1a..d0b58547 100644 --- a/plat/common/include/arm/arm64/paging.h +++ b/plat/common/include/arm/arm64/paging.h @@ -65,7 +65,7 @@ * for the frame allocators through this area. */ #define DIRECTMAP_AREA_START 0x0000ff8000000000 -#define DIRECTMAP_AREA_END 0xffffffffffffffff +#define DIRECTMAP_AREA_END 0x0000ffffffffffff #define DIRECTMAP_AREA_SIZE (DIRECTMAP_AREA_END - DIRECTMAP_AREA_START + 1) static inline __vaddr_t -- Gitee From 49727471409d2b61dd50fa3f156952db47ed9c57 Mon Sep 17 00:00:00 2001 From: Michalis Pappas Date: Sun, 28 Apr 2024 10:27:59 +0200 Subject: [PATCH 04/18] arch/arm64: Add definitions for block-size mappings VMSAv8-64 does not provide a naming scheme for the block size mapped by PT block descriptors at various translation levels. Moreover, the block size varies depending on the size of the translation granule. To provide granularity agnostic definitions, use the x86_64 terminology of Large / Huge pages. Signed-off-by: Michalis Pappas Reviewed-by: Sergiu Moga Reviewed-by: Serban Sorohan Approved-by: Razvan Deaconescu GitHub-Closes: #1373 --- arch/arm/arm64/include/uk/asm/paging.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/arch/arm/arm64/include/uk/asm/paging.h b/arch/arm/arm64/include/uk/asm/paging.h index fdd9ef25..53248905 100644 --- a/arch/arm/arm64/include/uk/asm/paging.h +++ b/arch/arm/arm64/include/uk/asm/paging.h @@ -69,6 +69,16 @@ struct ukarch_pagetable { #define PT_PTES_PER_LEVEL 512 #define PT_LEVEL_SHIFT 9 +#define PAGE_LARGE_LEVEL 1 +#define PAGE_LARGE_SHIFT 21 +#define PAGE_LARGE_SIZE 0x200000UL +#define PAGE_LARGE_MASK (~(PAGE_LARGE_SIZE - 1)) + +#define PAGE_HUGE_LEVEL 2 +#define PAGE_HUGE_SHIFT 30 +#define PAGE_HUGE_SIZE 0x40000000UL +#define PAGE_HUGE_MASK (~(PAGE_HUGE_SIZE - 1)) + /* We use plain values here so we do not create dependencies on external helper * macros, which would forbid us to use the macros in functions defined further * down in this header. -- Gitee From d7fe8a83293655f328c159d3b838e4a02cb3bbb4 Mon Sep 17 00:00:00 2001 From: Michalis Pappas Date: Sun, 28 Apr 2024 11:11:10 +0200 Subject: [PATCH 05/18] plat/common: Rework paged memory init Rework the initialization of paged memory to provide a more flexible implementation that is capable of handling regions beyond the limits defined in the boot pagetables. The motivation for this change is to allow mapping device regions that are unknown at compile-time, such as Unprotected IPA Alias regions of Arm CCA Realms, the address of which depends on the executing platform. Under the new scheme bootinfo is reduced to only contain mrds that correspond to valid memory regions. This deprecates the unmap_mrd region and the UKPLAT_MEMRF_MAP / UKPLAT_MEMRF_UNMAP mrd flags. Moreover, the boot pagetables are no longer updated during paged memory init, but instead are replaced with a new pagetable that initialized with the regions defined in bootinfo. Besides the additional flexibility, this implementation has the potential of some performance improvement as it removes expensive TLB flush operations associated with unmap. Signed-off-by: Michalis Pappas Co-authored-by: Sergiu Moga Reviewed-by: Sergiu Moga Reviewed-by: Serban Sorohan Approved-by: Razvan Deaconescu GitHub-Closes: #1373 --- plat/common/memory.c | 94 ++++---------------------------------------- plat/common/paging.c | 84 +++++++++++++++++++-------------------- 2 files changed, 50 insertions(+), 128 deletions(-) diff --git a/plat/common/memory.c b/plat/common/memory.c index f197e207..20374ea1 100644 --- a/plat/common/memory.c +++ b/plat/common/memory.c @@ -38,6 +38,10 @@ #include #include +#if CONFIG_HAVE_PAGING +#include +#endif /* CONFIG_HAVE_PAGING */ + extern struct ukplat_memregion_desc bpt_unmap_mrd; static struct uk_alloc *plat_allocator; @@ -64,15 +68,12 @@ struct uk_alloc *ukplat_memallocator_get(void) void *ukplat_memregion_alloc(__sz size, int type, __u16 flags) { struct ukplat_memregion_desc *mrd, alloc_mrd = {0}; - __paddr_t unmap_start; - __sz unmap_len, desired_sz; struct ukplat_bootinfo *bi; __paddr_t pstart, pend; __paddr_t ostart, olen; + __sz desired_sz; int rc; - align_free_memregion(&bpt_unmap_mrd, &unmap_start, &unmap_len, NULL); - /* Preserve desired size */ desired_sz = size; size = ALIGN_UP(size, __PAGE_SIZE); @@ -83,11 +84,6 @@ void *ukplat_memregion_alloc(__sz size, int type, __u16 flags) pstart = ALIGN_UP(mrd->pbase, __PAGE_SIZE); pend = pstart + size; - if (unmap_len && - (!RANGE_CONTAIN(unmap_start, unmap_len, pstart, size) || - pend > mrd->pbase + mrd->len)) - continue; - if ((mrd->flags & UKPLAT_MEMRF_PERMS) != (UKPLAT_MEMRF_READ | UKPLAT_MEMRF_WRITE)) return NULL; @@ -95,13 +91,6 @@ void *ukplat_memregion_alloc(__sz size, int type, __u16 flags) ostart = mrd->pbase; olen = mrd->len; - /* Check whether we are allocating from an in-image memory hole - * or not. If no, then it is not already mapped. - */ - if (!RANGE_CONTAIN(__BASE_ADDR, __END - __BASE_ADDR, - pstart, size)) - flags |= UKPLAT_MEMRF_MAP; - /* If fragmenting this memory region leaves it with length 0, * then simply overwrite and return it instead. */ @@ -130,8 +119,8 @@ void *ukplat_memregion_alloc(__sz size, int type, __u16 flags) alloc_mrd.pg_off = 0; alloc_mrd.len = desired_sz; alloc_mrd.pg_count = PAGE_COUNT(desired_sz); - alloc_mrd.type = type; - alloc_mrd.flags = flags | UKPLAT_MEMRF_MAP; + alloc_mrd.type = type; + alloc_mrd.flags = flags; bi = ukplat_bootinfo_get(); if (unlikely(!bi)) @@ -475,76 +464,9 @@ int ukplat_memregion_get(int i, struct ukplat_memregion_desc **mrd) } #ifdef CONFIG_HAVE_PAGING -#include - -static int ukplat_memregion_list_insert_unmaps(struct ukplat_bootinfo *bi) -{ - __paddr_t unmap_start, unmap_end, unmap_len; - int rc; - - if (!bpt_unmap_mrd.len) - return 0; - - /* Be PIE aware: split the unmap memory region so that we do no unmap - * the Kernel image. - */ - align_free_memregion(&bpt_unmap_mrd, &unmap_start, &unmap_len, - &unmap_end); - - /* After Kernel image */ - rc = ukplat_memregion_list_insert( - &bi->mrds, - &(struct ukplat_memregion_desc){ - .pbase = 0, - .vbase = ALIGN_UP(__END, __PAGE_SIZE), - .pg_off = 0, - .len = unmap_end - ALIGN_UP(__END, __PAGE_SIZE), - .pg_count = PAGE_COUNT(unmap_end - ALIGN_UP(__END, __PAGE_SIZE)), - .type = 0, - .flags = UKPLAT_MEMRF_UNMAP, - }); - if (unlikely(rc < 0)) - return rc; - - /* Before Kernel image */ - return ukplat_memregion_list_insert( - &bi->mrds, - &(struct ukplat_memregion_desc){ - .pbase = 0, - .vbase = unmap_start, - .pg_off = 0, - .len = ALIGN_DOWN(__BASE_ADDR, __PAGE_SIZE) - unmap_start, - .pg_count = - PAGE_COUNT(ALIGN_DOWN(__BASE_ADDR, __PAGE_SIZE) - unmap_start), - .type = 0, - .flags = UKPLAT_MEMRF_UNMAP, - }); -} - int ukplat_mem_init(void) { - struct ukplat_bootinfo *bi = ukplat_bootinfo_get(); - int rc; - - UK_ASSERT(bi); - - rc = ukplat_memregion_list_insert_unmaps(bi); - if (unlikely(rc < 0)) - return rc; - - rc = ukplat_paging_init(); - if (unlikely(rc < 0)) - return rc; - - /* Remove the two memory regions inserted by - * ukplat_memregion_list_insert_unmaps(). Due to their `pbase` nature - * and us never adding regions starting from zero-page, they are - * guaranteed to be the first in the list - */ - ukplat_memregion_list_delete(&bi->mrds, 0); - ukplat_memregion_list_delete(&bi->mrds, 0); - - return 0; + return ukplat_paging_init(); } #else /* CONFIG_HAVE_PAGING */ int ukplat_mem_init(void) diff --git a/plat/common/paging.c b/plat/common/paging.c index e2f6866c..ad502a64 100644 --- a/plat/common/paging.c +++ b/plat/common/paging.c @@ -69,6 +69,12 @@ static inline void pg_pt_free(struct uk_pagetable *pt, __vaddr_t pt_vaddr, static int pg_page_split(struct uk_pagetable *pt, __vaddr_t pt_vaddr, __vaddr_t vaddr, unsigned int level); +static int pg_page_mapx(struct uk_pagetable *pt, __vaddr_t pt_vaddr, + unsigned int level, __vaddr_t vaddr, __paddr_t paddr, + __sz len, unsigned long attr, unsigned long flags, + __pte_t template, unsigned int template_level, + struct ukplat_page_mapx *mapx); + static int pg_page_unmap(struct uk_pagetable *pt, __vaddr_t pt_vaddr, unsigned int level, __vaddr_t vaddr, __sz len, unsigned long flags); @@ -278,16 +284,37 @@ int ukplat_pt_init(struct uk_pagetable *pt, __paddr_t start, __sz len) if (unlikely(rc)) return rc; - /* We create the new page table from the page table hierarchy that is - * currently configured in hardware. To that end we just set the root - * address. Note that it is not a problem that the page tables have not - * been allocated by the frame allocator. Unmapping pages that do not - * stem from the memory allocator silently fails and is ignored. + /* Allocate a new top-level page table */ + rc = pg_pt_alloc(pt, &pt->pt_vbase, &pt->pt_pbase, PT_LEVELS - 1); + if (unlikely(rc)) + return rc; + + /* FIXME: The direct-mapped page table is an architecture-specific + * abstraction, so it should be mapped by the pgarch_ API. What + * currently prevents us from doing so is that upon pgarch_pt_init() + * we haven't yet allocated a top-level pagetable. + * + * Options: + * 1. Move both the allocation of the top-level page table to + * pgarch_pt_init() and make it a requriement that it is + * initialized by that function. Move mapping of the direct + * mapped region into pgarch_pt_init(). Requires that the + * pgarch layer has access to pg_pt_alloc() and pg_pt_map(). + * + * 2. Create a pgarch_pt_post_init() and move mapping of the + * direct-mapped region there. It also requires mapping + * capabilities from pgarch. */ - pt->pt_pbase = ukarch_pt_read_base(); - pt->pt_vbase = pgarch_pt_map(pt, pt->pt_pbase, PT_LEVELS - 1); - if (unlikely(pt->pt_vbase == __VADDR_INV)) - return -ENOMEM; + rc = pg_page_mapx(pt, pt->pt_vbase, PT_LEVELS - 1, + DIRECTMAP_AREA_START, /* vaddr */ + 0x00000000, /* paddr */ + DIRECTMAP_AREA_SIZE, /* len */ + PAGE_ATTR_PROT_READ | PAGE_ATTR_PROT_WRITE, /* attr */ + 0, /* flags */ + PT_Lx_PTE_INVALID(PAGE_LEVEL), + PAGE_LEVEL, NULL); + if (unlikely(rc)) + return rc; #ifdef CONFIG_PAGING_STATS /* If we have stats active, we need to discover all mappings etc. We @@ -556,6 +583,7 @@ static int pg_page_mapx(struct uk_pagetable *pt, __vaddr_t pt_vaddr, pte_idx = PT_Lx_IDX(vaddr, lvl); page_size = PAGE_Lx_SIZE(lvl); + do { /* This loop is responsible for walking the page table down * until we reach the desired level. If there is a page table @@ -566,7 +594,6 @@ static int pg_page_mapx(struct uk_pagetable *pt, __vaddr_t pt_vaddr, rc = ukarch_pte_read(pt_vaddr, lvl, pte_idx, &pte); if (unlikely(rc)) return rc; - if (PT_Lx_PTE_PRESENT(pte, lvl)) { /* If there is already a larger page mapped * at this address and we have a mapx, we @@ -1436,8 +1463,6 @@ static inline unsigned long bootinfo_to_page_attr(__u16 flags) return prot; } -extern struct ukplat_memregion_desc bpt_unmap_mrd; - int ukplat_paging_init(void) { struct ukplat_memregion_desc *mrd; @@ -1483,38 +1508,13 @@ int ukplat_paging_init(void) if (unlikely(!kernel_pt.fa)) return rc; - /* Perform unmappings */ - ukplat_memregion_foreach(&mrd, 0, UKPLAT_MEMRF_UNMAP, - UKPLAT_MEMRF_UNMAP) { - /* Ensure unmap memory region descriptors' correctness */ - /* Must be non-empty and aligned end-to-end */ - UK_ASSERT(mrd->len); - UK_ASSERT(mrd->pg_count * PAGE_SIZE == mrd->len); - UK_ASSERT(PAGE_ALIGNED(mrd->vbase)); - UK_ASSERT(!mrd->pg_off); - /* Physical base address must be 0 */ - UK_ASSERT(!mrd->pbase); - /* Virtual base address must be a valid value */ - UK_ASSERT(mrd->vbase != __U64_MAX); - - rc = ukplat_page_unmap(&kernel_pt, mrd->vbase, mrd->pg_count, - PAGE_FLAG_KEEP_FRAMES); - if (unlikely(rc)) - return rc; - } - /* Perform mappings */ - ukplat_memregion_foreach(&mrd, 0, UKPLAT_MEMRF_MAP, - UKPLAT_MEMRF_MAP) { - UK_ASSERT_VALID_MRD(mrd); - /* Do not allow mapping of free memory regions */ - UK_ASSERT(mrd->type != UKPLAT_MEMRT_FREE); - -#if defined(CONFIG_ARCH_ARM_64) - if (!RANGE_CONTAIN(bpt_unmap_mrd.pbase, bpt_unmap_mrd.len, mrd->pbase, - mrd->pg_count * PAGE_SIZE)) + ukplat_memregion_foreach(&mrd, 0, 0, 0) { + /* Free mem is managed by falloc */ + if (mrd->type == UKPLAT_MEMRT_FREE) continue; -#endif + + UK_ASSERT_VALID_MRD(mrd); prot = bootinfo_to_page_attr(mrd->flags); -- Gitee From 94ffe7902475ac9c9c53a94c151e38595214b436 Mon Sep 17 00:00:00 2001 From: Michalis Pappas Date: Sun, 28 Apr 2024 15:45:35 +0200 Subject: [PATCH 06/18] plat/common: Move vaddr check to callers of pgarch_page_mapx() Move vaddr check from pgarch_page_mapx() to its callers, as that function is also used to map the direct-mapped region, the vaddr of which is past (__VADDR_MAX - len). Signed-off-by: Michalis Pappas Reviewed-by: Sergiu Moga Reviewed-by: Serban Sorohan Approved-by: Razvan Deaconescu GitHub-Closes: #1373 --- plat/common/paging.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/plat/common/paging.c b/plat/common/paging.c index ad502a64..57b47754 100644 --- a/plat/common/paging.c +++ b/plat/common/paging.c @@ -559,7 +559,6 @@ static int pg_page_mapx(struct uk_pagetable *pt, __vaddr_t pt_vaddr, UK_ASSERT(len > 0); UK_ASSERT(PAGE_Lx_ALIGNED(len, to_lvl)); UK_ASSERT(PAGE_Lx_ALIGNED(vaddr, to_lvl)); - UK_ASSERT(vaddr <= __VADDR_MAX - len); UK_ASSERT(ukarch_vaddr_range_isvalid(vaddr, len)); if (paddr != __PADDR_ANY) { @@ -872,6 +871,8 @@ int ukplat_page_mapx(struct uk_pagetable *pt, __vaddr_t vaddr, UK_ASSERT(pt->pt_vbase != __VADDR_INV); UK_ASSERT(pt->pt_pbase != __PADDR_INV); + UK_ASSERT(vaddr <= __VADDR_MAX - len); + return pg_page_mapx(pt, pt->pt_vbase, PT_LEVELS - 1, vaddr, paddr, len, attr, flags, PT_Lx_PTE_INVALID(PAGE_LEVEL), PAGE_LEVEL, mapx); @@ -921,6 +922,9 @@ static int pg_page_split(struct uk_pagetable *pt, __vaddr_t pt_vaddr, * contiguous range of physical memory than the input page */ paddr = PT_Lx_PTE_PADDR(pte, level); + + UK_ASSERT(vaddr <= __VADDR_MAX - PAGE_Lx_SIZE(level)); + rc = pg_page_mapx(pt, new_pt_vaddr, level - 1, vaddr, paddr, PAGE_Lx_SIZE(level), attr, flags, pte, level, NULL); if (unlikely(rc)) -- Gitee From 07776ea074de225efa088701ed89816c554dac4a Mon Sep 17 00:00:00 2001 From: Michalis Pappas Date: Sun, 22 Oct 2023 18:10:26 +0200 Subject: [PATCH 07/18] plat/common: Do not pass MEMRF_MAP when allocating memregs UKPLAT_MEMRF_MAP / UKPLAT_MEMRF_UNMAP mrd types have been obsoleted by the reworked implementation of paged memory init. Signed-off-by: Michalis Pappas Reviewed-by: Sergiu Moga Reviewed-by: Serban Sorohan Approved-by: Razvan Deaconescu GitHub-Closes: #1373 --- plat/common/bootinfo_fdt.c | 9 ++--- plat/common/include/uk/plat/common/memory.h | 45 ++++++++++----------- 2 files changed, 26 insertions(+), 28 deletions(-) diff --git a/plat/common/bootinfo_fdt.c b/plat/common/bootinfo_fdt.c index 7a8efcf0..17974391 100644 --- a/plat/common/bootinfo_fdt.c +++ b/plat/common/bootinfo_fdt.c @@ -127,8 +127,7 @@ static void fdt_bootinfo_cmdl_mrd(struct ukplat_bootinfo *bi, void *fdtp) cmdl = ukplat_memregion_alloc(fdt_cmdl_len + sizeof(CONFIG_UK_NAME) + 1, UKPLAT_MEMRT_CMDLINE, - UKPLAT_MEMRF_READ | - UKPLAT_MEMRF_MAP); + UKPLAT_MEMRF_READ); if (unlikely(!cmdl)) ukplat_bootinfo_crash("Command-line alloc failed\n"); @@ -183,7 +182,7 @@ static void fdt_bootinfo_initrd_mrd(struct ukplat_bootinfo *bi, void *fdtp) mrd.len = initrd_addr(fdt_initrd_end[0], end_len) - initrd_base; mrd.pg_count = PAGE_COUNT(mrd.pg_off + mrd.len); mrd.type = UKPLAT_MEMRT_INITRD; - mrd.flags = UKPLAT_MEMRF_READ | UKPLAT_MEMRF_MAP; + mrd.flags = UKPLAT_MEMRF_READ; rc = ukplat_memregion_list_insert(&bi->mrds, &mrd); if (unlikely(rc < 0)) @@ -200,8 +199,8 @@ static void fdt_bootinfo_fdt_mrd(struct ukplat_bootinfo *bi, void *fdtp) mrd.pg_off = (__u64)fdtp - mrd.pbase; mrd.len = fdt_totalsize(fdtp); mrd.pg_count = PAGE_COUNT(mrd.pg_off + mrd.len); - mrd.type = UKPLAT_MEMRT_DEVICETREE; - mrd.flags = UKPLAT_MEMRF_READ | UKPLAT_MEMRF_MAP; + mrd.type = UKPLAT_MEMRT_DEVICETREE; + mrd.flags = UKPLAT_MEMRF_READ; rc = ukplat_memregion_list_insert(&bi->mrds, &mrd); if (unlikely(rc < 0)) diff --git a/plat/common/include/uk/plat/common/memory.h b/plat/common/include/uk/plat/common/memory.h index bca77ee3..9e7f4cf3 100644 --- a/plat/common/include/uk/plat/common/memory.h +++ b/plat/common/include/uk/plat/common/memory.h @@ -187,33 +187,33 @@ ukplat_memregion_list_insert_legacy_hi_mem(struct ukplat_memregion_list *list) /* Note that we are mapping it as writable as well to cope with the * potential existence of the VGA framebuffer/SMM shadow memory. */ - rc = ukplat_memregion_list_insert( - list, - &(struct ukplat_memregion_desc){ - .pbase = X86_HI_MEM_START, - .vbase = X86_HI_MEM_START, - .pg_off = 0, - .len = X86_HI_MEM_LEN, - .pg_count = PAGE_COUNT(X86_HI_MEM_LEN), - .type = UKPLAT_MEMRT_RESERVED, - .flags = UKPLAT_MEMRF_READ | UKPLAT_MEMRF_WRITE | UKPLAT_MEMRF_MAP, - }); + rc = ukplat_memregion_list_insert(list, + &(struct ukplat_memregion_desc){ + .pbase = X86_HI_MEM_START, + .vbase = X86_HI_MEM_START, + .pg_off = 0, + .len = X86_HI_MEM_LEN, + .pg_count = PAGE_COUNT(X86_HI_MEM_LEN), + .type = UKPLAT_MEMRT_RESERVED, + .flags = UKPLAT_MEMRF_READ | + UKPLAT_MEMRF_WRITE, + }); if (unlikely(rc < 0)) return rc; /* Keep compatibility with other possible reports of reserved memory * regions of this area and mark the BIOS System Memory as read-only. */ - rc = ukplat_memregion_list_insert( - list, &(struct ukplat_memregion_desc){ - .pbase = X86_BIOS_ROM_START, - .vbase = X86_BIOS_ROM_START, - .pg_off = 0, - .len = X86_BIOS_ROM_LEN, - .pg_count = PAGE_COUNT(X86_BIOS_ROM_LEN), - .type = UKPLAT_MEMRT_RESERVED, - .flags = UKPLAT_MEMRF_READ | UKPLAT_MEMRF_MAP, - }); + rc = ukplat_memregion_list_insert(list, + &(struct ukplat_memregion_desc){ + .pbase = X86_BIOS_ROM_START, + .vbase = X86_BIOS_ROM_START, + .pg_off = 0, + .len = X86_BIOS_ROM_LEN, + .pg_count = PAGE_COUNT(X86_BIOS_ROM_LEN), + .type = UKPLAT_MEMRT_RESERVED, + .flags = UKPLAT_MEMRF_READ, + }); if (unlikely(rc < 0)) return rc; @@ -235,8 +235,7 @@ ukplat_memregion_alloc_sipi_vect(void) x86_start16_addr = (__uptr)ukplat_memregion_alloc(len, UKPLAT_MEMRT_RESERVED, UKPLAT_MEMRF_READ | - UKPLAT_MEMRF_WRITE | - UKPLAT_MEMRF_MAP); + UKPLAT_MEMRF_WRITE); if (unlikely(!x86_start16_addr || x86_start16_addr >= X86_HI_MEM_START)) return -ENOMEM; -- Gitee From e2eb41e284dc60a25f536cc530dd9bbfd852989d Mon Sep 17 00:00:00 2001 From: Michalis Pappas Date: Sun, 22 Oct 2023 18:11:10 +0200 Subject: [PATCH 08/18] plat/kvm: Do not pass MEMRF_MAP when allocating memregs UKPLAT_MEMRF_MAP / UKPLAT_MEMRF_UNMAP mrd types have been obsoleted by the reworked implementation of paged memory init. Signed-off-by: Michalis Pappas Reviewed-by: Sergiu Moga Reviewed-by: Serban Sorohan Approved-by: Razvan Deaconescu GitHub-Closes: #1373 --- plat/kvm/arm/setup.c | 6 ++---- plat/kvm/efi.c | 17 +++++++---------- plat/kvm/x86/lxboot.c | 8 ++++---- plat/kvm/x86/multiboot.c | 10 +++++----- plat/kvm/x86/setup.c | 6 ++---- 5 files changed, 20 insertions(+), 27 deletions(-) diff --git a/plat/kvm/arm/setup.c b/plat/kvm/arm/setup.c index 28f215c4..cb5697d0 100644 --- a/plat/kvm/arm/setup.c +++ b/plat/kvm/arm/setup.c @@ -140,8 +140,7 @@ static inline int cmdline_init(struct ukplat_bootinfo *bi) */ cmdline = ukplat_memregion_alloc(cmdline_len + 1, UKPLAT_MEMRT_KERNEL, UKPLAT_MEMRF_READ | - UKPLAT_MEMRF_WRITE | - UKPLAT_MEMRF_MAP); + UKPLAT_MEMRF_WRITE); if (unlikely(!cmdline)) return -ENOMEM; @@ -175,8 +174,7 @@ void __no_pauth _ukplat_entry(struct ukplat_bootinfo *bi) /* Allocate boot stack */ bstack = ukplat_memregion_alloc(__STACK_SIZE, UKPLAT_MEMRT_STACK, UKPLAT_MEMRF_READ | - UKPLAT_MEMRF_WRITE | - UKPLAT_MEMRF_MAP); + UKPLAT_MEMRF_WRITE); if (unlikely(!bstack)) UK_CRASH("Boot stack alloc failed\n"); bstack = (void *)((__uptr)bstack + __STACK_SIZE); diff --git a/plat/kvm/efi.c b/plat/kvm/efi.c index 29cb1df2..98c2965f 100644 --- a/plat/kvm/efi.c +++ b/plat/kvm/efi.c @@ -152,14 +152,12 @@ static int uk_efi_md_to_bi_mrd(struct uk_efi_mem_desc *const md, case UK_EFI_PAL_CODE: case UK_EFI_PERSISTENT_MEMORY: mrd->type = UKPLAT_MEMRT_RESERVED; - mrd->flags = UKPLAT_MEMRF_READ | UKPLAT_MEMRF_MAP; - + mrd->flags = UKPLAT_MEMRF_READ; break; case UK_EFI_MEMORY_MAPPED_IO: case UK_EFI_MEMORY_MAPPED_IO_PORT_SPACE: mrd->type = UKPLAT_MEMRT_RESERVED; - mrd->flags = UKPLAT_MEMRF_READ | UKPLAT_MEMRF_WRITE | - UKPLAT_MEMRF_MAP; + mrd->flags = UKPLAT_MEMRF_READ | UKPLAT_MEMRF_WRITE; break; case UK_EFI_RUNTIME_SERVICES_CODE: @@ -179,8 +177,7 @@ static int uk_efi_md_to_bi_mrd(struct uk_efi_mem_desc *const md, * permissions to avoid crashes generated by explicit firmware * calls. */ - mrd->flags = UKPLAT_MEMRF_READ | UKPLAT_MEMRF_WRITE | - UKPLAT_MEMRF_MAP; + mrd->flags = UKPLAT_MEMRF_READ | UKPLAT_MEMRF_WRITE; break; case UK_EFI_LOADER_CODE: @@ -346,7 +343,7 @@ static void uk_efi_rt_md_to_bi_mrds(struct ukplat_memregion_desc **rt_mrds, rt_mrd->pg_count = mat_md->number_of_pages; rt_mrd->vbase = rt_mrd->pbase; rt_mrd->type = UKPLAT_MEMRT_RESERVED; - rt_mrd->flags = UKPLAT_MEMRF_MAP; + rt_mrd->flags = UKPLAT_MEMRF_READ; if (mat_md->attribute & UK_EFI_MEMORY_XP) if (mat_md->attribute & UK_EFI_MEMORY_RO) rt_mrd->flags |= UKPLAT_MEMRF_READ; @@ -557,7 +554,7 @@ static void uk_efi_setup_bootinfo_cmdl(struct ukplat_bootinfo *bi) mrd.len = len; mrd.pg_count = PAGE_COUNT(len); mrd.type = UKPLAT_MEMRT_CMDLINE; - mrd.flags = UKPLAT_MEMRF_READ | UKPLAT_MEMRF_MAP; + mrd.flags = UKPLAT_MEMRF_READ; rc = ukplat_memregion_list_insert(&bi->mrds, &mrd); if (unlikely(rc < 0)) UK_EFI_CRASH("Failed to insert cmdl mrd\n"); @@ -589,7 +586,7 @@ static void uk_efi_setup_bootinfo_initrd(struct ukplat_bootinfo *bi) mrd.len = len; mrd.pg_count = PAGE_COUNT(len); mrd.type = UKPLAT_MEMRT_INITRD; - mrd.flags = UKPLAT_MEMRF_READ | UKPLAT_MEMRF_MAP; + mrd.flags = UKPLAT_MEMRF_READ; rc = ukplat_memregion_list_insert(&bi->mrds, &mrd); if (unlikely(rc < 0)) UK_EFI_CRASH("Failed to insert initrd mrd\n"); @@ -618,7 +615,7 @@ static void uk_efi_setup_bootinfo_dtb(struct ukplat_bootinfo *bi) mrd.len = len; mrd.pg_count = PAGE_COUNT(len); mrd.type = UKPLAT_MEMRT_DEVICETREE; - mrd.flags = UKPLAT_MEMRF_READ | UKPLAT_MEMRF_MAP; + mrd.flags = UKPLAT_MEMRF_READ; rc = ukplat_memregion_list_insert(&bi->mrds, &mrd); if (unlikely(rc < 0)) UK_EFI_CRASH("Failed to insert dtb mrd\n"); diff --git a/plat/kvm/x86/lxboot.c b/plat/kvm/x86/lxboot.c index 9d6a70dd..e7c96e9f 100644 --- a/plat/kvm/x86/lxboot.c +++ b/plat/kvm/x86/lxboot.c @@ -44,8 +44,8 @@ lxboot_init_cmdline(struct ukplat_bootinfo *bi, struct lxboot_params *bp) mrd.pg_off = cmdline_addr - mrd.pbase; mrd.len = cmdline_size; mrd.pg_count = PAGE_COUNT(mrd.pg_off + mrd.len); - mrd.type = UKPLAT_MEMRT_CMDLINE; - mrd.flags = UKPLAT_MEMRF_READ | UKPLAT_MEMRF_MAP; + mrd.type = UKPLAT_MEMRT_CMDLINE; + mrd.flags = UKPLAT_MEMRF_READ; #ifdef CONFIG_UKPLAT_MEMRNAME memcpy(mrd.name, "cmdline", sizeof("cmdline")); #endif /* CONFIG_UKPLAT_MEMRNAME */ @@ -83,7 +83,7 @@ lxboot_init_initrd(struct ukplat_bootinfo *bi, struct lxboot_params *bp) mrd.len = initrd_size; mrd.type = UKPLAT_MEMRT_INITRD; mrd.pg_count = PAGE_COUNT(mrd.pg_off + initrd_size); - mrd.flags = UKPLAT_MEMRF_MAP | UKPLAT_MEMRF_READ; + mrd.flags = UKPLAT_MEMRF_READ; #ifdef CONFIG_UKPLAT_MEMRNAME memcpy(mrd.name, "initrd", sizeof("initrd")); #endif /* CONFIG_UKPLAT_MEMRNAME */ @@ -130,7 +130,7 @@ lxboot_init_mem(struct ukplat_bootinfo *bi, struct lxboot_params *bp) mrd.len = PAGE_ALIGN_UP(mrd.len); } else { mrd.type = UKPLAT_MEMRT_RESERVED; - mrd.flags = UKPLAT_MEMRF_READ | UKPLAT_MEMRF_MAP; + mrd.flags = UKPLAT_MEMRF_READ; /* We assume that reserved regions cannot * overlap with loaded modules. diff --git a/plat/kvm/x86/multiboot.c b/plat/kvm/x86/multiboot.c index 1e164ff6..352e6353 100644 --- a/plat/kvm/x86/multiboot.c +++ b/plat/kvm/x86/multiboot.c @@ -71,14 +71,15 @@ void multiboot_entry(struct lcpu *lcpu, struct multiboot_info *mi) if (mi->flags & MULTIBOOT_INFO_CMDLINE) { if (mi->cmdline) { cmdline_len = strlen((const char *)(__uptr)mi->cmdline); + /* 1:1 mapping */ mrd.pbase = PAGE_ALIGN_DOWN(mi->cmdline); mrd.vbase = mrd.pbase; mrd.pg_off = mi->cmdline - mrd.pbase; mrd.len = cmdline_len; mrd.pg_count = PAGE_COUNT(mrd.pg_off + cmdline_len); - mrd.type = UKPLAT_MEMRT_CMDLINE; - mrd.flags = UKPLAT_MEMRF_READ | UKPLAT_MEMRF_MAP; + mrd.type = UKPLAT_MEMRT_CMDLINE; + mrd.flags = UKPLAT_MEMRF_READ; mrd_insert(bi, &mrd); @@ -108,7 +109,7 @@ void multiboot_entry(struct lcpu *lcpu, struct multiboot_info *mi) mrd.len = mods[i].mod_end - mods[i].mod_start; mrd.pg_count = PAGE_COUNT(mrd.pg_off + mrd.len); mrd.type = UKPLAT_MEMRT_INITRD; - mrd.flags = UKPLAT_MEMRF_READ | UKPLAT_MEMRF_MAP; + mrd.flags = UKPLAT_MEMRF_READ; #ifdef CONFIG_UKPLAT_MEMRNAME strncpy(mrd.name, (char *)(__uptr)mods[i].cmdline, @@ -156,8 +157,7 @@ void multiboot_entry(struct lcpu *lcpu, struct multiboot_info *mi) mrd.len = PAGE_ALIGN_UP(mrd.len + mrd.pg_off); } else { mrd.type = UKPLAT_MEMRT_RESERVED; - mrd.flags = UKPLAT_MEMRF_READ | - UKPLAT_MEMRF_MAP; + mrd.flags = UKPLAT_MEMRF_READ; /* We assume that reserved regions cannot * overlap with loaded modules. diff --git a/plat/kvm/x86/setup.c b/plat/kvm/x86/setup.c index 99953e9e..ade0d1d2 100644 --- a/plat/kvm/x86/setup.c +++ b/plat/kvm/x86/setup.c @@ -45,8 +45,7 @@ static inline int cmdline_init(struct ukplat_bootinfo *bi) */ cmdline = ukplat_memregion_alloc(cmdline_len + 1, UKPLAT_MEMRT_KERNEL, UKPLAT_MEMRF_READ | - UKPLAT_MEMRF_WRITE | - UKPLAT_MEMRF_MAP); + UKPLAT_MEMRF_WRITE); if (unlikely(!cmdline)) return -ENOMEM; @@ -98,8 +97,7 @@ void _ukplat_entry(struct lcpu *lcpu, struct ukplat_bootinfo *bi) /* Allocate boot stack */ bstack = ukplat_memregion_alloc(__STACK_SIZE, UKPLAT_MEMRT_STACK, UKPLAT_MEMRF_READ | - UKPLAT_MEMRF_WRITE | - UKPLAT_MEMRF_MAP); + UKPLAT_MEMRF_WRITE); if (unlikely(!bstack)) UK_CRASH("Boot stack alloc failed\n"); -- Gitee From bb11f6f85969c0b33292e4e41f22319ad519d6dc Mon Sep 17 00:00:00 2001 From: Michalis Pappas Date: Tue, 26 Dec 2023 13:25:53 +0100 Subject: [PATCH 09/18] include/memory: Retire UKPLAT_MEMRF_MAP / UKPLAT_MEMRF_UNMAP UKPLAT_MEMRF_MAP / UKPLAT_MEMRF_UNMAP mrd types have been obsoleted by the reworked implementation of paged memory init. Signed-off-by: Michalis Pappas Reviewed-by: Sergiu Moga Reviewed-by: Serban Sorohan Approved-by: Razvan Deaconescu GitHub-Closes: #1373 --- include/uk/plat/memory.h | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/include/uk/plat/memory.h b/include/uk/plat/memory.h index d43edff2..742f2167 100644 --- a/include/uk/plat/memory.h +++ b/include/uk/plat/memory.h @@ -70,9 +70,6 @@ extern "C" { #define UKPLAT_MEMRF_WRITE 0x0002 /* Region is writable */ #define UKPLAT_MEMRF_EXECUTE 0x0004 /* Region is executable */ -#define UKPLAT_MEMRF_UNMAP 0x0010 /* Must be unmapped at boot */ -#define UKPLAT_MEMRF_MAP 0x0020 /* Must be mapped at boot */ - /** * Descriptor of a memory region */ @@ -147,11 +144,9 @@ struct ukplat_memregion_desc { */ #define UK_ASSERT_VALID_MRD_FLAGS(mrd) \ do { \ - __u16 flags_all = UKPLAT_MEMRF_READ | \ - UKPLAT_MEMRF_WRITE | \ - UKPLAT_MEMRF_EXECUTE | \ - UKPLAT_MEMRF_UNMAP | \ - UKPLAT_MEMRF_MAP; \ + __u16 flags_all __maybe_unused = UKPLAT_MEMRF_READ | \ + UKPLAT_MEMRF_WRITE | \ + UKPLAT_MEMRF_EXECUTE; \ \ UK_ASSERT(((mrd)->flags & flags_all) == (mrd)->flags); \ } while (0) -- Gitee From 022d0af7f8242be92464d2397999a7769cd63fc7 Mon Sep 17 00:00:00 2001 From: Michalis Pappas Date: Mon, 25 Dec 2023 13:06:41 +0100 Subject: [PATCH 10/18] plat/common: Add UKPLAT_MEMRT_DEVICE type Regions of this type are added by device drivers that implement an early init. Specifically, upon completion of the earlyinit boot stage, device regions are expected to be mapped with appropriate protections, and additionally be added to bootinfo using the UKPLAT_MEMRT_DEVICE type. Signed-off-by: Michalis Pappas Reviewed-by: Sergiu Moga Reviewed-by: Serban Sorohan Approved-by: Razvan Deaconescu GitHub-Closes: #1373 --- include/uk/plat/memory.h | 4 ++++ plat/common/bootinfo.c | 3 +++ plat/common/include/uk/plat/common/memory.h | 3 +++ plat/common/memory.c | 1 + 4 files changed, 11 insertions(+) diff --git a/include/uk/plat/memory.h b/include/uk/plat/memory.h index 742f2167..2cf7f851 100644 --- a/include/uk/plat/memory.h +++ b/include/uk/plat/memory.h @@ -61,6 +61,7 @@ extern "C" { #define UKPLAT_MEMRT_CMDLINE 0x0010 /* Command line */ #define UKPLAT_MEMRT_DEVICETREE 0x0020 /* Device tree */ #define UKPLAT_MEMRT_STACK 0x0040 /* Thread stack */ +#define UKPLAT_MEMRT_DEVICE 0x0080 /* Device region */ /* Memory region flags */ #define UKPLAT_MEMRF_ALL 0xffff @@ -105,6 +106,7 @@ struct ukplat_memregion_desc { * UKPLAT_MEMRT_CMDLINE Command line * UKPLAT_MEMRT_DEVICETREE Device tree * UKPLAT_MEMRT_STACK Thread stack + * UKPLAT_MEMRT_DEVICE Device * @param mrd pointer to the memory region descriptor whose type to validate */ #define UK_ASSERT_VALID_MRD_TYPE(mrd) \ @@ -123,6 +125,8 @@ struct ukplat_memregion_desc { case UKPLAT_MEMRT_DEVICETREE: \ __fallthrough; \ case UKPLAT_MEMRT_STACK: \ + __fallthrough; \ + case UKPLAT_MEMRT_DEVICE: \ break; \ default: \ UK_CRASH("Invalid mrd type: %hu\n", \ diff --git a/plat/common/bootinfo.c b/plat/common/bootinfo.c index a12a6839..82148ffb 100644 --- a/plat/common/bootinfo.c +++ b/plat/common/bootinfo.c @@ -93,6 +93,9 @@ void ukplat_bootinfo_print(void) case UKPLAT_MEMRT_STACK: type = "stck"; break; + case UKPLAT_MEMRT_DEVICE: + type = "device "; + break; default: type = ""; break; diff --git a/plat/common/include/uk/plat/common/memory.h b/plat/common/include/uk/plat/common/memory.h index 9e7f4cf3..c023c9a8 100644 --- a/plat/common/include/uk/plat/common/memory.h +++ b/plat/common/include/uk/plat/common/memory.h @@ -410,6 +410,9 @@ ukplat_memregion_print_desc(struct ukplat_memregion_desc *mrd) case UKPLAT_MEMRT_STACK: type = "stck"; break; + case UKPLAT_MEMRT_DEVICE: + type = "device"; + break; default: type = ""; break; diff --git a/plat/common/memory.c b/plat/common/memory.c index 20374ea1..65a984a4 100644 --- a/plat/common/memory.c +++ b/plat/common/memory.c @@ -160,6 +160,7 @@ static inline int get_mrd_prio(struct ukplat_memregion_desc *const m) case UKPLAT_MEMRT_CMDLINE: case UKPLAT_MEMRT_STACK: case UKPLAT_MEMRT_DEVICETREE: + case UKPLAT_MEMRT_DEVICE: case UKPLAT_MEMRT_KERNEL: return MRD_PRIO_KRNL_RSRC; case UKPLAT_MEMRT_RESERVED: -- Gitee From 0e45fc33a70e74bd0c8dc46a932767eff1756d82 Mon Sep 17 00:00:00 2001 From: wangguokun Date: Tue, 19 Nov 2024 15:55:31 +0800 Subject: [PATCH 11/18] lib/uktty: Map pl011 region on runtime Insert the region of pl011 when CONFIG_PAGING is enabled.This is now required as paged memory init unmaps any memory not registered by early devices Signed-off-by: wangguokun --- drivers/uktty/pl011/Makefile.uk | 2 ++ drivers/uktty/pl011/pl011.c | 37 +++++++++++++++++++++++++++++++++ plat/kvm/arm/setup.c | 1 - 3 files changed, 39 insertions(+), 1 deletion(-) diff --git a/drivers/uktty/pl011/Makefile.uk b/drivers/uktty/pl011/Makefile.uk index 374c5b24..12c9320b 100644 --- a/drivers/uktty/pl011/Makefile.uk +++ b/drivers/uktty/pl011/Makefile.uk @@ -3,4 +3,6 @@ $(eval $(call addlib_s,libuktty_pl011,$(CONFIG_LIBUKTTY_PL011))) CINCLUDES-$(CONFIG_LIBUKTTY_PL011) += -I$(LIBUKTTY_PL011_BASE)/include CXXINCLUDES-$(CONFIG_LIBUKTTY_PL011) += -I$(LIBUKTTY_PL011_BASE)/include +LIBUKTTY_PL011_CINCLUDES-y += -I$(UK_PLAT_COMMON_BASE)/include + LIBUKTTY_PL011_SRCS-y += $(LIBUKTTY_PL011_BASE)/pl011.c diff --git a/drivers/uktty/pl011/pl011.c b/drivers/uktty/pl011/pl011.c index 4ed34673..31004474 100644 --- a/drivers/uktty/pl011/pl011.c +++ b/drivers/uktty/pl011/pl011.c @@ -25,6 +25,10 @@ #include #include +#if CONFIG_PAGING +#include +#endif /* CONFIG_PAGING */ + /* * PL011 UART base address * As we are using the PA = VA mapping, some SoC would set PA 0 @@ -41,6 +45,34 @@ uint8_t pl011_uart_initialized; uint64_t pl011_uart_bas; #endif /* !CONFIG_LIBUKTTY_PL011_EARLY_CONSOLE_BASE */ +#if CONFIG_PAGING +static void pl011_region_devmap(__paddr_t base, __sz size) +{ + struct ukplat_memregion_desc mrd = { 0 }; + struct ukplat_bootinfo *bi; + int rc; + + UK_ASSERT(size > 0); + UK_ASSERT(base <= __U64_MAX - size); + + bi = ukplat_bootinfo_get(); + if (unlikely(!bi)) + UK_CRASH("Invalid bootinfo"); + + mrd.pbase = PAGE_ALIGN_DOWN(base); + mrd.vbase = PAGE_ALIGN_DOWN(base); + mrd.pg_off = base - mrd.pbase; + mrd.len = size; + mrd.pg_count = PAGE_COUNT(mrd.pg_off + mrd.len); + mrd.flags = UKPLAT_MEMRF_READ | UKPLAT_MEMRF_WRITE; + mrd.type = UKPLAT_MEMRT_DEVICE; + + rc = ukplat_memregion_list_insert(&bi->mrds, &mrd); + if (unlikely(rc < 0)) + UK_CRASH("Could not insert mrd\n"); +} +#endif /* CONFIG_PAGING */ + static void init_pl011(uint64_t bas) { pl011_uart_bas = bas; @@ -80,6 +112,11 @@ void pl011_console_init(const void *dtb) val = fdt_get_address(dtb, offset, 0, &naddr, &nsize); if (val < 0) UK_CRASH("Could not find proper address!\n"); + +#if defined(CONFIG_PAGING) + pl011_region_devmap(naddr, nsize); +#endif + init_pl011((uint64_t)naddr); uk_pr_info("PL011 UART initialized\n"); pl011_uart_initialized = 1; diff --git a/plat/kvm/arm/setup.c b/plat/kvm/arm/setup.c index cb5697d0..76ec95f4 100644 --- a/plat/kvm/arm/setup.c +++ b/plat/kvm/arm/setup.c @@ -164,7 +164,6 @@ void __no_pauth _ukplat_entry(struct ukplat_bootinfo *bi) int rc; fdt = (void *)bi->dtb; - kvm_console_init(fdt); rc = cmdline_init(bi); -- Gitee From 7e5e40ad9c71a78f101b9ce70d6657b1594546ef Mon Sep 17 00:00:00 2001 From: Michalis Pappas Date: Wed, 15 May 2024 11:48:35 +0200 Subject: [PATCH 12/18] drivers/ukintctlr/gic: Map GIC regions at runtime Map GIC regions dynamically if paging is enabled. This is now required as paged memory init unmaps any memory not backed by an mrd. Signed-off-by: Michalis Pappas Reviewed-by: Sergiu Moga Reviewed-by: Serban Sorohan Approved-by: Razvan Deaconescu GitHub-Closes: #1373 --- drivers/ukintctlr/gic/Config.uk | 1 + drivers/ukintctlr/gic/gic-v2.c | 40 +++++++++++++++++++++++++++++++++ drivers/ukintctlr/gic/gic-v3.c | 40 +++++++++++++++++++++++++++++++++ 3 files changed, 81 insertions(+) diff --git a/drivers/ukintctlr/gic/Config.uk b/drivers/ukintctlr/gic/Config.uk index 331fc6d1..e15d2264 100644 --- a/drivers/ukintctlr/gic/Config.uk +++ b/drivers/ukintctlr/gic/Config.uk @@ -1,5 +1,6 @@ config LIBUKINTCTLR_GIC select LIBUKINTCTLR + select LIBUKBUS_PLATFORM if CONFIG_PAGING bool config LIBUKINTCTLR_GICV2 diff --git a/drivers/ukintctlr/gic/gic-v2.c b/drivers/ukintctlr/gic/gic-v2.c index 3ed84476..b912b602 100644 --- a/drivers/ukintctlr/gic/gic-v2.c +++ b/drivers/ukintctlr/gic/gic-v2.c @@ -50,6 +50,11 @@ #include #include +#if CONFIG_PAGING +#include +#include +#endif /* CONFIG_PAGING */ + /* Max CPU interface for GICv2 */ #define GIC_MAX_CPUIF 8 @@ -614,6 +619,33 @@ static int gicv2_do_probe(void) } #endif /* !CONFIG_UKPLAT_ACPI */ +#if CONFIG_PAGING +static int gicv2_map(void) +{ + __vaddr_t vbase; + + vbase = uk_bus_pf_devmap(gicv2_drv.dist_mem_addr, + gicv2_drv.dist_mem_size); + if (unlikely(PTRISERR(vbase))) { + uk_pr_err("Could not map GIC dist (%d)\n", PTR2ERR(vbase)); + return PTR2ERR(vbase); + } + + gicv2_drv.dist_mem_addr = vbase; + + vbase = uk_bus_pf_devmap(gicv2_drv.cpuif_mem_addr, + gicv2_drv.cpuif_mem_size); + if (unlikely(PTRISERR(vbase))) { + uk_pr_err("Could not map GIC cpuif (%d)\n", PTR2ERR(vbase)); + return PTR2ERR(vbase); + } + + gicv2_drv.cpuif_mem_addr = vbase; + + return 0; +} +#endif /* CONFIG_PAGING */ + /** * Probe device tree or ACPI for GICv2 * NOTE: First time must not be called from multiple CPUs in parallel @@ -647,6 +679,14 @@ int gicv2_probe(struct _gic_dev **dev) return rc; } +#if CONFIG_PAGING + rc = gicv2_map(); + if (unlikely(rc)) { + uk_pr_err("Could not map device (%d)\n", rc); + return rc; + } +#endif /* CONFIG_PAGING */ + uk_pr_info("Found GICv2 on:\n"); uk_pr_info("\tDistributor : 0x%lx - 0x%lx\n", gicv2_drv.dist_mem_addr, diff --git a/drivers/ukintctlr/gic/gic-v3.c b/drivers/ukintctlr/gic/gic-v3.c index 1168b963..d5a8a93a 100644 --- a/drivers/ukintctlr/gic/gic-v3.c +++ b/drivers/ukintctlr/gic/gic-v3.c @@ -56,6 +56,11 @@ #include #include +#if CONFIG_PAGING +#include +#include +#endif /* CONFIG_PAGING */ + #define GIC_MAX_IRQ UK_INTCTLR_MAX_IRQ #define GIC_RDIST_REG(gdev, r) \ @@ -794,6 +799,33 @@ static int gicv3_do_probe(void) } #endif /* !CONFIG_UKPLAT_ACPI */ +#if CONFIG_PAGING +static int gicv3_map(void) +{ + __vaddr_t vbase; + + vbase = uk_bus_pf_devmap(gicv3_drv.dist_mem_addr, + gicv3_drv.dist_mem_size); + if (unlikely(PTRISERR(vbase))) { + uk_pr_err("Could not map GIC dist (%d)\n", PTR2ERR(vbase)); + return PTR2ERR(vbase); + } + + gicv3_drv.dist_mem_addr = vbase; + + vbase = uk_bus_pf_devmap(gicv3_drv.rdist_mem_addr, + gicv3_drv.rdist_mem_size); + if (unlikely(PTRISERR(vbase))) { + uk_pr_err("Could not map GIC rdist (%d)\n", PTR2ERR(vbase)); + return PTR2ERR(vbase); + } + + gicv3_drv.rdist_mem_addr = vbase; + + return 0; +} +#endif /* CONFIG_PAGING */ + /** * Probe device tree for GICv3 * NOTE: First time must not be called from multiple CPUs in parallel @@ -827,6 +859,14 @@ int gicv3_probe(struct _gic_dev **dev) return rc; } +#if CONFIG_PAGING + rc = gicv3_map(); + if (unlikely(rc)) { + uk_pr_err("Could not map device (%d)\n", rc); + return rc; + } +#endif /* CONFIG_PAGING */ + uk_pr_info("Found GICv3 on:\n"); uk_pr_info("\tDistributor : 0x%lx - 0x%lx\n", gicv3_drv.dist_mem_addr, gicv3_drv.dist_mem_addr + gicv3_drv.dist_mem_size - 1); -- Gitee From 2fe053f8b35dcca39db8a55bd8a016f3b1c74ce0 Mon Sep 17 00:00:00 2001 From: Michalis Pappas Date: Wed, 15 May 2024 11:49:54 +0200 Subject: [PATCH 13/18] drivers/ukrtc/pl031: Map pl031 regions at runtime Map pl031 regions dynamically if paging is enabled. This is now required as paged memory init unmaps any memory not backed by an mrd. Signed-off-by: Michalis Pappas Reviewed-by: Sergiu Moga Reviewed-by: Serban Sorohan Approved-by: Razvan Deaconescu GitHub-Closes: #1373 --- plat/drivers/rtc/pl031.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/plat/drivers/rtc/pl031.c b/plat/drivers/rtc/pl031.c index 12a1b223..3472823b 100644 --- a/plat/drivers/rtc/pl031.c +++ b/plat/drivers/rtc/pl031.c @@ -41,6 +41,10 @@ #include #include +#if CONFIG_PAGING +#include +#endif /* CONFIG_PAGING */ + static __u64 pl031_base_addr; static int pl031_irq; @@ -177,6 +181,12 @@ int pl031_init_rtc(void *dtb) } uk_pr_info("Found RTC at: 0x%lx\n", pl031_base_addr); +#if CONFIG_PAGING + pl031_base_addr = uk_bus_pf_devmap(pl031_base_addr, size); + if (unlikely(PTRISERR(pl031_base_addr))) + return PTR2ERR(pl031_base_addr); +#endif /* CONFIG_PAGING */ + rc = uk_intctlr_irq_fdt_xlat(dtb, offs, 0, &irq); if (unlikely(rc)) return rc; -- Gitee From 3e8c96c29ceff5ccdfd56a9776360132e2d7d79e Mon Sep 17 00:00:00 2001 From: wangguokun Date: Tue, 19 Nov 2024 16:08:44 +0800 Subject: [PATCH 14/18] lib/uktty: fix the header file warning for pl031 Signed-off-by: wangguokun --- plat/drivers/rtc/pl031.c | 1 + 1 file changed, 1 insertion(+) diff --git a/plat/drivers/rtc/pl031.c b/plat/drivers/rtc/pl031.c index 3472823b..2dbb2957 100644 --- a/plat/drivers/rtc/pl031.c +++ b/plat/drivers/rtc/pl031.c @@ -43,6 +43,7 @@ #if CONFIG_PAGING #include +#include #endif /* CONFIG_PAGING */ static __u64 pl031_base_addr; -- Gitee From 27ab61ed2c5feda0e28ad68f85936c286be0ae55 Mon Sep 17 00:00:00 2001 From: wangguokun Date: Tue, 19 Nov 2024 16:20:18 +0800 Subject: [PATCH 15/18] lib/uktty: map ns8250 region at runtime Insert the region of ns8250 when CONFIG_PAGING is enabled. This is now required as paged memory init unmaps any memory not backed by an mrd. Signed-off-by: wangguokun --- drivers/uktty/ns8250/Config.uk | 1 + drivers/uktty/ns8250/Makefile.uk | 1 + drivers/uktty/ns8250/ns8250.c | 37 +++++++++++++++++++++++++++++++- 3 files changed, 38 insertions(+), 1 deletion(-) diff --git a/drivers/uktty/ns8250/Config.uk b/drivers/uktty/ns8250/Config.uk index 3b2b1a44..ad134eed 100644 --- a/drivers/uktty/ns8250/Config.uk +++ b/drivers/uktty/ns8250/Config.uk @@ -1,6 +1,7 @@ menuconfig LIBTNTTY_NS8250 bool "NS8250" default n + select LIBUKBUS_PLATFORM if CONFIG_PAGING depends on ARCH_ARM_64 if LIBTNTTY_NS8250 diff --git a/drivers/uktty/ns8250/Makefile.uk b/drivers/uktty/ns8250/Makefile.uk index 0919e780..f61357ee 100644 --- a/drivers/uktty/ns8250/Makefile.uk +++ b/drivers/uktty/ns8250/Makefile.uk @@ -1,4 +1,5 @@ $(eval $(call addlib_s,libuktty_ns8250,$(CONFIG_LIBTNTTY_NS8250))) CINCLUDES-y += -I$(LIBUKTTY_NS8250_BASE)/include +LIBUKTTY_NS8250_CINCLUDES-y += -I$(UK_PLAT_COMMON_BASE)/include LIBUKTTY_NS8250_SRCS-y += $(LIBUKTTY_NS8250_BASE)/ns8250.c diff --git a/drivers/uktty/ns8250/ns8250.c b/drivers/uktty/ns8250/ns8250.c index e5eae492..5bf7a644 100644 --- a/drivers/uktty/ns8250/ns8250.c +++ b/drivers/uktty/ns8250/ns8250.c @@ -19,6 +19,10 @@ #include #include +#if CONFIG_PAGING +#include +#endif /* CONFIG_PAGING */ + #define NS8250_THR_OFFSET 0x00U #define NS8250_RBR_OFFSET 0x00U #define NS8250_DLL_OFFSET 0x00U @@ -73,6 +77,34 @@ static __u32 ns8250_mode_x_div = 16; /* Macros to extract int shift/width infos */ #define EXTRACT_HIGH_BITS(x) ((__u32)((x) & 0xFFFFFFFF) >> 24) +#if CONFIG_PAGING +static void ns8250_region_devmap(__paddr_t base, __sz size) +{ + struct ukplat_memregion_desc mrd = { 0 }; + struct ukplat_bootinfo *bi; + int rc; + + UK_ASSERT(size > 0); + UK_ASSERT(base <= __U64_MAX - size); + + bi = ukplat_bootinfo_get(); + if (unlikely(!bi)) + UK_CRASH("Invalid bootinfo"); + + mrd.pbase = PAGE_ALIGN_DOWN(base); + mrd.vbase = PAGE_ALIGN_DOWN(base); + mrd.pg_off = base - mrd.pbase; + mrd.len = size; + mrd.pg_count = PAGE_COUNT(mrd.pg_off + mrd.len); + mrd.flags = UKPLAT_MEMRF_READ | UKPLAT_MEMRF_WRITE; + mrd.type = UKPLAT_MEMRT_DEVICE; + + rc = ukplat_memregion_list_insert(&bi->mrds, &mrd); + if (unlikely(rc < 0)) + UK_CRASH("Could not insert mrd\n"); +} +#endif /* CONFIG_PAGING */ + static inline __u32 ns8250_reg_read(__u32 reg) { __u32 ret; @@ -167,8 +199,11 @@ void ns8250_console_init(const void *dtb) if (val < 0) UK_CRASH("Could not find proper address!\n"); - reg_uart_base = naddr; +#if defined(CONFIG_PAGING) + ns8250_region_devmap(naddr, nsize); +#endif /* CONFIG_PAGING */ + reg_uart_base = naddr; regs = fdt_getprop(dtb, offset, "reg-shift", &len); if (regs) ns8250_reg_shift = EXTRACT_HIGH_BITS(*regs); -- Gitee From db35ad9df8964bd46d8286cdfa916089b4ebae08 Mon Sep 17 00:00:00 2001 From: wangguokun Date: Fri, 22 Nov 2024 10:51:45 +0800 Subject: [PATCH 16/18] plat/common: ukplat_memregion_alloc retrieves address via parameter When ukplat_memregion_alloc returns a void * type, if it allocates a memory region (memregion) starting at address 0, it may be mistakenly interpreted as a failed allocation due to the overlap in meaning between address 0 and NULL. Signed-off-by: wangguokun --- include/uk/plat/memory.h | 6 ++-- plat/common/bootinfo_fdt.c | 22 +++++++------ plat/common/include/uk/plat/common/memory.h | 10 +++--- plat/common/memory.c | 16 +++++----- plat/kvm/arm/setup.c | 35 +++++++++++---------- plat/kvm/x86/setup.c | 32 ++++++++++--------- 6 files changed, 65 insertions(+), 56 deletions(-) diff --git a/include/uk/plat/memory.h b/include/uk/plat/memory.h index 2cf7f851..87be409d 100644 --- a/include/uk/plat/memory.h +++ b/include/uk/plat/memory.h @@ -361,11 +361,13 @@ struct uk_alloc *ukplat_memallocator_get(void); * Memory region type to use for the allocated memory. Can be 0. * @param flags * Flags of the allocated memory region. + * @param raddr + * A pointer to the allocated memory on success. * * @return - * A pointer to the allocated memory on success, NULL otherwise. + * 0 on success, not 0 otherwise. */ -void *ukplat_memregion_alloc(__sz size, int type, __u16 flags); +int ukplat_memregion_alloc(__sz size, int type, __u16 flags, __paddr_t *raddr); /** * Initializes the memory mapping based on the platform or architecture defined diff --git a/plat/common/bootinfo_fdt.c b/plat/common/bootinfo_fdt.c index 17974391..1216fbf9 100644 --- a/plat/common/bootinfo_fdt.c +++ b/plat/common/bootinfo_fdt.c @@ -115,7 +115,9 @@ static void fdt_bootinfo_cmdl_mrd(struct ukplat_bootinfo *bi, void *fdtp) int fdt_cmdl_len; __sz cmdl_len; int nchosen; - char *cmdl; + __u64 cmdl; + char *cmdl_ptr; + int rc; nchosen = fdt_path_offset(fdtp, "/chosen"); if (unlikely(nchosen < 0)) @@ -125,20 +127,20 @@ static void fdt_bootinfo_cmdl_mrd(struct ukplat_bootinfo *bi, void *fdtp) if (unlikely(!fdt_cmdl || fdt_cmdl_len <= 0)) return; - cmdl = ukplat_memregion_alloc(fdt_cmdl_len + sizeof(CONFIG_UK_NAME) + 1, - UKPLAT_MEMRT_CMDLINE, - UKPLAT_MEMRF_READ); - if (unlikely(!cmdl)) + rc = ukplat_memregion_alloc(fdt_cmdl_len + sizeof(CONFIG_UK_NAME) + 1, + UKPLAT_MEMRT_CMDLINE, UKPLAT_MEMRF_READ, &cmdl); + if (unlikely(rc)) ukplat_bootinfo_crash("Command-line alloc failed\n"); cmdl_len = sizeof(CONFIG_UK_NAME); - strncpy(cmdl, CONFIG_UK_NAME, cmdl_len); - cmdl[cmdl_len - 1] = ' '; - strncpy(cmdl + cmdl_len, fdt_cmdl, fdt_cmdl_len); + cmdl_ptr = (char *)cmdl; + strncpy(cmdl_ptr, CONFIG_UK_NAME, cmdl_len); + cmdl_ptr[cmdl_len - 1] = ' '; + strncpy(cmdl_ptr + cmdl_len, fdt_cmdl, fdt_cmdl_len); cmdl_len += fdt_cmdl_len; - cmdl[cmdl_len] = '\0'; + cmdl_ptr[cmdl_len] = '\0'; - bi->cmdline = (__u64)cmdl; + bi->cmdline = cmdl; bi->cmdline_len = (__u64)cmdl_len; } diff --git a/plat/common/include/uk/plat/common/memory.h b/plat/common/include/uk/plat/common/memory.h index c023c9a8..ac64e6c9 100644 --- a/plat/common/include/uk/plat/common/memory.h +++ b/plat/common/include/uk/plat/common/memory.h @@ -229,14 +229,14 @@ static inline int ukplat_memregion_alloc_sipi_vect(void) { __sz len; + int rc; len = (__sz)((__uptr)x86_start16_end - (__uptr)x86_start16_begin); len = PAGE_ALIGN_UP(len); - x86_start16_addr = (__uptr)ukplat_memregion_alloc(len, - UKPLAT_MEMRT_RESERVED, - UKPLAT_MEMRF_READ | - UKPLAT_MEMRF_WRITE); - if (unlikely(!x86_start16_addr || x86_start16_addr >= X86_HI_MEM_START)) + rc = ukplat_memregion_alloc(len, UKPLAT_MEMRT_RESERVED, + UKPLAT_MEMRF_READ | UKPLAT_MEMRF_WRITE, + &x86_start16_addr); + if (unlikely(rc || x86_start16_addr >= X86_HI_MEM_START)) return -ENOMEM; return 0; diff --git a/plat/common/memory.c b/plat/common/memory.c index 65a984a4..0f875d63 100644 --- a/plat/common/memory.c +++ b/plat/common/memory.c @@ -65,7 +65,7 @@ struct uk_alloc *ukplat_memallocator_get(void) return plat_allocator; } -void *ukplat_memregion_alloc(__sz size, int type, __u16 flags) +int ukplat_memregion_alloc(__sz size, int type, __u16 flags, __paddr_t *raddr) { struct ukplat_memregion_desc *mrd, alloc_mrd = {0}; struct ukplat_bootinfo *bi; @@ -86,7 +86,7 @@ void *ukplat_memregion_alloc(__sz size, int type, __u16 flags) if ((mrd->flags & UKPLAT_MEMRF_PERMS) != (UKPLAT_MEMRF_READ | UKPLAT_MEMRF_WRITE)) - return NULL; + return -1; ostart = mrd->pbase; olen = mrd->len; @@ -103,7 +103,8 @@ void *ukplat_memregion_alloc(__sz size, int type, __u16 flags) mrd->type = type; mrd->flags = flags; - return (void *)pstart; + *raddr = pstart; + return 0; } /* Adjust free region */ @@ -124,7 +125,7 @@ void *ukplat_memregion_alloc(__sz size, int type, __u16 flags) bi = ukplat_bootinfo_get(); if (unlikely(!bi)) - return NULL; + return -1; rc = ukplat_memregion_list_insert(&bi->mrds, &alloc_mrd); if (unlikely(rc < 0)) { @@ -132,13 +133,14 @@ void *ukplat_memregion_alloc(__sz size, int type, __u16 flags) mrd->vbase = ostart; mrd->len = olen; - return NULL; + return -1; } - return (void *)pstart; + *raddr = pstart; + return 0; } - return NULL; + return -1; } /* We want a criteria based on which we decide which memory region to keep, diff --git a/plat/kvm/arm/setup.c b/plat/kvm/arm/setup.c index 76ec95f4..b057c3d3 100644 --- a/plat/kvm/arm/setup.c +++ b/plat/kvm/arm/setup.c @@ -119,12 +119,13 @@ enomethod: } #endif -static char *cmdline; +static __u64 cmdline; static __sz cmdline_len; static inline int cmdline_init(struct ukplat_bootinfo *bi) { char *cmdl; + int rc; if (bi->cmdline_len) { cmdl = (char *)bi->cmdline; @@ -138,28 +139,29 @@ static inline int cmdline_init(struct ukplat_bootinfo *bi) * by `ukplat_entry_argp` to obtain argc/argv. So mark it as a kernel * resource instead. */ - cmdline = ukplat_memregion_alloc(cmdline_len + 1, UKPLAT_MEMRT_KERNEL, - UKPLAT_MEMRF_READ | - UKPLAT_MEMRF_WRITE); - if (unlikely(!cmdline)) + rc = ukplat_memregion_alloc(cmdline_len + 1, UKPLAT_MEMRT_KERNEL, + UKPLAT_MEMRF_READ | UKPLAT_MEMRF_WRITE, + &cmdline); + if (unlikely(rc)) return -ENOMEM; - memcpy(cmdline, cmdl, cmdline_len); - cmdline[cmdline_len] = 0; + memcpy((void *)cmdline, cmdl, cmdline_len); + cmdl = (char *)cmdline; + cmdl[cmdline_len] = 0; return 0; } static void __noreturn _ukplat_entry2(void) { - ukplat_entry_argp(NULL, cmdline, cmdline_len); + ukplat_entry_argp(NULL, (char *)cmdline, cmdline_len); ukplat_lcpu_halt(); } void __no_pauth _ukplat_entry(struct ukplat_bootinfo *bi) { - void *bstack; + __u64 bstack; void *fdt; int rc; @@ -171,13 +173,12 @@ void __no_pauth _ukplat_entry(struct ukplat_bootinfo *bi) UK_CRASH("Failed to initialize command-line\n"); /* Allocate boot stack */ - bstack = ukplat_memregion_alloc(__STACK_SIZE, UKPLAT_MEMRT_STACK, - UKPLAT_MEMRF_READ | - UKPLAT_MEMRF_WRITE); - if (unlikely(!bstack)) + rc = ukplat_memregion_alloc(__STACK_SIZE, UKPLAT_MEMRT_STACK, + UKPLAT_MEMRF_READ | UKPLAT_MEMRF_WRITE, + &bstack); + if (unlikely(rc)) UK_CRASH("Boot stack alloc failed\n"); - bstack = (void *)((__uptr)bstack + __STACK_SIZE); - + bstack = (__u64)((__uptr)bstack + __STACK_SIZE); /* Initialize paging */ rc = ukplat_mem_init(); @@ -237,7 +238,7 @@ void __no_pauth _ukplat_entry(struct ukplat_bootinfo *bi) /* * Switch away from the bootstrap stack as early as possible. */ - uk_pr_info("Switch from bootstrap stack to stack @%p\n", bstack); + uk_pr_info("Switch from bootstrap stack to stack @%lx\n", bstack); - lcpu_arch_jump_to(bstack, _ukplat_entry2); + lcpu_arch_jump_to((void *)bstack, _ukplat_entry2); } diff --git a/plat/kvm/x86/setup.c b/plat/kvm/x86/setup.c index ade0d1d2..9303544b 100644 --- a/plat/kvm/x86/setup.c +++ b/plat/kvm/x86/setup.c @@ -24,12 +24,13 @@ #include #include -static char *cmdline; +static __u64 cmdline; static __sz cmdline_len; static inline int cmdline_init(struct ukplat_bootinfo *bi) { char *cmdl; + int rc; if (bi->cmdline_len) { cmdl = (char *)bi->cmdline; @@ -43,14 +44,15 @@ static inline int cmdline_init(struct ukplat_bootinfo *bi) * by `ukplat_entry_argp` to obtain argc/argv. So mark it as a kernel * resource instead. */ - cmdline = ukplat_memregion_alloc(cmdline_len + 1, UKPLAT_MEMRT_KERNEL, - UKPLAT_MEMRF_READ | - UKPLAT_MEMRF_WRITE); - if (unlikely(!cmdline)) + rc = ukplat_memregion_alloc(cmdline_len + 1, UKPLAT_MEMRT_KERNEL, + UKPLAT_MEMRF_READ | UKPLAT_MEMRF_WRITE, + &cmdline); + if (unlikely(rc)) return -ENOMEM; - memcpy(cmdline, cmdl, cmdline_len); - cmdline[cmdline_len] = 0; + memcpy((void *)cmdline, cmdl, cmdline_len); + cmdl = (char *)cmdline; + cmdl[cmdline_len] = 0; return 0; } @@ -64,7 +66,7 @@ static void __noreturn _ukplat_entry2(void) */ ukarch_cfi_unwind_end(); - ukplat_entry_argp(NULL, cmdline, cmdline_len); + ukplat_entry_argp(NULL, (char *)cmdline, cmdline_len); ukplat_lcpu_halt(); } @@ -72,7 +74,7 @@ static void __noreturn _ukplat_entry2(void) void _ukplat_entry(struct lcpu *lcpu, struct ukplat_bootinfo *bi) { int rc; - void *bstack; + __u64 bstack; _libkvmplat_init_console(); @@ -96,12 +98,12 @@ void _ukplat_entry(struct lcpu *lcpu, struct ukplat_bootinfo *bi) /* Allocate boot stack */ bstack = ukplat_memregion_alloc(__STACK_SIZE, UKPLAT_MEMRT_STACK, - UKPLAT_MEMRF_READ | - UKPLAT_MEMRF_WRITE); - if (unlikely(!bstack)) + UKPLAT_MEMRF_READ | UKPLAT_MEMRF_WRITE, + &bstack); + if (unlikely(rc)) UK_CRASH("Boot stack alloc failed\n"); - bstack = (void *)((__uptr)bstack + __STACK_SIZE); + bstack = (__u64)((__uptr)bstack + __STACK_SIZE); /* Initialize memory */ rc = ukplat_mem_init(); @@ -133,6 +135,6 @@ void _ukplat_entry(struct lcpu *lcpu, struct ukplat_bootinfo *bi) #endif /* CONFIG_HAVE_X86PKU */ /* Switch away from the bootstrap stack */ - uk_pr_info("Switch from bootstrap stack to stack @%p\n", bstack); - lcpu_arch_jump_to(bstack, _ukplat_entry2); + uk_pr_info("Switch from bootstrap stack to stack @%lx\n", bstack); + lcpu_arch_jump_to((void *)bstack, _ukplat_entry2); } -- Gitee From 24cc74b92e848c638c70dfc4fe7c807a5f6be071 Mon Sep 17 00:00:00 2001 From: wangguokun Date: Fri, 22 Nov 2024 15:06:49 +0800 Subject: [PATCH 17/18] lib/ukintctlr: Map bcm2835/bcm2836 region at runtime Map bcm2835/bcm2836 regions dynamically if CONFIG_PAGING is enabled.This is now required as paged memory init unmaps any memory not backed by an mrd. Signed-off-by: wangguokun --- drivers/ukintctlr/bcm_intc/bcm2835_intc.c | 12 ++++++++++++ drivers/ukintctlr/bcm_intc/bcm2836_intc.c | 12 ++++++++++++ 2 files changed, 24 insertions(+) diff --git a/drivers/ukintctlr/bcm_intc/bcm2835_intc.c b/drivers/ukintctlr/bcm_intc/bcm2835_intc.c index 0580e424..e010cba2 100644 --- a/drivers/ukintctlr/bcm_intc/bcm2835_intc.c +++ b/drivers/ukintctlr/bcm_intc/bcm2835_intc.c @@ -22,6 +22,11 @@ #include #include +#if CONFIG_PAGING +#include +#include +#endif /* CONFIG_PAGING */ + static const char *const bcm2835_intc_device_list[] __maybe_unused = { /* 此处bcm2836-armctrl-ic沿用2835的中断控制器设计 */ "brcm,bcm2836-armctrl-ic", @@ -220,6 +225,13 @@ int bcm2835_intc_set_info(void *fdt) return ret; } +#if defined(CONFIG_PAGING) + bcm2835_intc_drv.mem_addr = + uk_bus_pf_devmap(bcm2835_intc_drv.mem_addr, bcm2835_intc_drv.mem_size); + if (unlikely(PTRISERR(bcm2835_intc_drv.mem_addr))) + uk_pr_err("Could not map bcm2835\n"); +#endif + bcm2835_intc_set_ops(); uk_pr_info("Found bcm2835_intc on:\n"); diff --git a/drivers/ukintctlr/bcm_intc/bcm2836_intc.c b/drivers/ukintctlr/bcm_intc/bcm2836_intc.c index 12b89d5a..ffe45764 100644 --- a/drivers/ukintctlr/bcm_intc/bcm2836_intc.c +++ b/drivers/ukintctlr/bcm_intc/bcm2836_intc.c @@ -25,6 +25,11 @@ #include #include +#if CONFIG_PAGING +#include +#include +#endif /* CONFIG_PAGING */ + int fdt_bcm2836_intc_offset; static const char *const bcm2836_intc_device_list[] __maybe_unused = { @@ -305,6 +310,13 @@ int bcm2836_intc_do_probe(struct bcm_intc_dev **dev) return ret; } +#if defined(CONFIG_PAGING) + bcm2836_intc_drv.mem_addr = + uk_bus_pf_devmap(bcm2836_intc_drv.mem_addr, bcm2836_intc_drv.mem_size); + if (unlikely(PTRISERR(bcm2836_intc_drv.mem_addr))) + uk_pr_err("Could not map bcm2836\n"); +#endif + bcm2836_intc_set_ops(); *dev = &bcm2836_intc_drv; -- Gitee From 1cdd8740612a52a6e6f181a824d567a76da9dfa4 Mon Sep 17 00:00:00 2001 From: wangguokun Date: Tue, 26 Nov 2024 18:32:11 +0800 Subject: [PATCH 18/18] plat/common: init page table mapping for UKPLAT_MEMRT_DEVICE memregion ukplat_paging_init does not specifically handle UKPLAT_MEMRT_DEVICE type memregion, which results in incorrect memory attribute settings for devices during early initialization, rendering the devices non-functional. Signed-off-by: wangguokun --- plat/common/paging.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/plat/common/paging.c b/plat/common/paging.c index 57b47754..53d3eadf 100644 --- a/plat/common/paging.c +++ b/plat/common/paging.c @@ -1520,6 +1520,19 @@ int ukplat_paging_init(void) UK_ASSERT_VALID_MRD(mrd); + if (mrd->type == UKPLAT_MEMRT_DEVICE) { + prot = PAGE_ATTR_PROT_RW; +#ifdef CONFIG_ARCH_ARM_64 + prot |= PAGE_ATTR_TYPE_DEVICE_nGnRnE; +#endif /* CONFIG_ARCH_ARM_64 */ + rc = ukplat_page_map(&kernel_pt, mrd->vbase, mrd->pbase, + mrd->pg_count, prot, 0); + if (unlikely(rc)) + return rc; + + continue; + } + prot = bootinfo_to_page_attr(mrd->flags); rc = ukplat_page_map(&kernel_pt, mrd->vbase, mrd->pbase, mrd->pg_count, -- Gitee