aboutsummaryrefslogtreecommitdiffstats
path: root/meta-oe
diff options
context:
space:
mode:
Diffstat (limited to 'meta-oe')
-rw-r--r--meta-oe/recipes-support/vboxguestdrivers/vboxguestdrivers/0001-fixes_for_mm_struct.patch176
-rw-r--r--meta-oe/recipes-support/vboxguestdrivers/vboxguestdrivers/0002-fixes_for_module_memory.patch65
-rw-r--r--meta-oe/recipes-support/vboxguestdrivers/vboxguestdrivers/0003-fixes_for_changes_in_cpu_tlbstate.patch39
-rw-r--r--meta-oe/recipes-support/vboxguestdrivers/vboxguestdrivers/021-linux-5-8.patch5046
-rw-r--r--meta-oe/recipes-support/vboxguestdrivers/vboxguestdrivers/kernel-5.8-4.patch19
-rw-r--r--meta-oe/recipes-support/vboxguestdrivers/vboxguestdrivers_6.1.12.bb5
6 files changed, 5047 insertions, 303 deletions
diff --git a/meta-oe/recipes-support/vboxguestdrivers/vboxguestdrivers/0001-fixes_for_mm_struct.patch b/meta-oe/recipes-support/vboxguestdrivers/vboxguestdrivers/0001-fixes_for_mm_struct.patch
deleted file mode 100644
index 1ad5ce51bf..0000000000
--- a/meta-oe/recipes-support/vboxguestdrivers/vboxguestdrivers/0001-fixes_for_mm_struct.patch
+++ /dev/null
@@ -1,176 +0,0 @@
-From 98070c936931879d2b8e22939724b5a0689721d0 Mon Sep 17 00:00:00 2001
-From: Hongxu Jia <hongxu.jia@windriver.com>
-Date: Tue, 18 Aug 2020 17:48:29 +0800
-Subject: [PATCH 1/3] fixes_for_mm_struct
-
-Upstream-Status: Backport [https://www.virtualbox.org/ticket/19644]
-
-Signed-off-by: Hongxu Jia <hongxu.jia@windriver.com>
----
- .../Runtime/r0drv/linux/memobj-r0drv-linux.c | 74 +++++++++++++++++--
- 1 file changed, 67 insertions(+), 7 deletions(-)
-
-diff --git a/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c b/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c
-index 37389bcc..cdc7e8e6 100644
---- a/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c
-+++ b/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c
-@@ -222,9 +222,17 @@ static void *rtR0MemObjLinuxDoMmap(RTR3PTR R3PtrFixed, size_t cb, size_t uAlignm
- #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
- ulAddr = vm_mmap(NULL, R3PtrFixed, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, 0);
- #else
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
- down_write(&pTask->mm->mmap_sem);
-+#else
-+ down_write(&pTask->mm->mmap_lock);
-+#endif
- ulAddr = do_mmap(NULL, R3PtrFixed, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, 0);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
- up_write(&pTask->mm->mmap_sem);
-+#else
-+ up_write(&pTask->mm->mmap_lock);
-+#endif
- #endif
- }
- else
-@@ -232,9 +240,17 @@ static void *rtR0MemObjLinuxDoMmap(RTR3PTR R3PtrFixed, size_t cb, size_t uAlignm
- #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
- ulAddr = vm_mmap(NULL, 0, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS, 0);
- #else
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
- down_write(&pTask->mm->mmap_sem);
-+#else
-+ down_write(&pTask->mm->mmap_lock);
-+#endif
- ulAddr = do_mmap(NULL, 0, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS, 0);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
- up_write(&pTask->mm->mmap_sem);
-+#else
-+ up_write(&pTask->mm->mmap_lock);
-+#endif
- #endif
- if ( !(ulAddr & ~PAGE_MASK)
- && (ulAddr & (uAlignment - 1)))
-@@ -269,13 +285,29 @@ static void rtR0MemObjLinuxDoMunmap(void *pv, size_t cb, struct task_struct *pTa
- Assert(pTask == current); RT_NOREF_PV(pTask);
- vm_munmap((unsigned long)pv, cb);
- #elif defined(USE_RHEL4_MUNMAP)
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
- down_write(&pTask->mm->mmap_sem);
-+#else
-+ down_write(&pTask->mm->mmap_lock);
-+#endif
- do_munmap(pTask->mm, (unsigned long)pv, cb, 0); /* should it be 1 or 0? */
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
- up_write(&pTask->mm->mmap_sem);
- #else
-+ up_write(&pTask->mm->mmap_lock);
-+#endif
-+#else
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
- down_write(&pTask->mm->mmap_sem);
-+#else
-+ down_write(&pTask->mm->mmap_lock);
-+#endif
- do_munmap(pTask->mm, (unsigned long)pv, cb);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
- up_write(&pTask->mm->mmap_sem);
-+#else
-+ up_write(&pTask->mm->mmap_lock);
-+#endif
- #endif
- }
-
-@@ -593,7 +625,11 @@ DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
- size_t iPage;
- Assert(pTask);
- if (pTask && pTask->mm)
-- down_read(&pTask->mm->mmap_sem);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-+ down_read(&pTask->mm->mmap_sem);
-+#else
-+ down_read(&pTask->mm->mmap_lock);
-+#endif
-
- iPage = pMemLnx->cPages;
- while (iPage-- > 0)
-@@ -608,7 +644,11 @@ DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
- }
-
- if (pTask && pTask->mm)
-- up_read(&pTask->mm->mmap_sem);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-+ up_read(&pTask->mm->mmap_sem);
-+#else
-+ up_read(&pTask->mm->mmap_lock);
-+#endif
- }
- /* else: kernel memory - nothing to do here. */
- break;
-@@ -1076,7 +1116,11 @@ DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3P
- papVMAs = (struct vm_area_struct **)RTMemAlloc(sizeof(*papVMAs) * cPages);
- if (papVMAs)
- {
-- down_read(&pTask->mm->mmap_sem);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-+ down_read(&pTask->mm->mmap_sem);
-+#else
-+ down_read(&pTask->mm->mmap_lock);
-+#endif
-
- /*
- * Get user pages.
-@@ -1162,7 +1206,11 @@ DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3P
- papVMAs[rc]->vm_flags |= VM_DONTCOPY | VM_LOCKED;
- }
-
-- up_read(&pTask->mm->mmap_sem);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-+ up_read(&pTask->mm->mmap_sem);
-+#else
-+ up_read(&pTask->mm->mmap_lock);
-+#endif
-
- RTMemFree(papVMAs);
-
-@@ -1189,7 +1237,11 @@ DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3P
- #endif
- }
-
-- up_read(&pTask->mm->mmap_sem);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-+ up_read(&pTask->mm->mmap_sem);
-+#else
-+ up_read(&pTask->mm->mmap_lock);
-+#endif
-
- RTMemFree(papVMAs);
- rc = VERR_LOCK_FAILED;
-@@ -1604,7 +1656,11 @@ DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ p
- const size_t cPages = (offSub + cbSub) >> PAGE_SHIFT;
- size_t iPage;
-
-- down_write(&pTask->mm->mmap_sem);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-+ down_write(&pTask->mm->mmap_sem);
-+#else
-+ down_write(&pTask->mm->mmap_lock);
-+#endif
-
- rc = VINF_SUCCESS;
- if (pMemLnxToMap->cPages)
-@@ -1721,7 +1777,11 @@ DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ p
- }
- #endif /* CONFIG_NUMA_BALANCING */
-
-- up_write(&pTask->mm->mmap_sem);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-+ up_write(&pTask->mm->mmap_sem);
-+#else
-+ up_write(&pTask->mm->mmap_lock);
-+#endif
-
- if (RT_SUCCESS(rc))
- {
---
-2.18.2
-
diff --git a/meta-oe/recipes-support/vboxguestdrivers/vboxguestdrivers/0002-fixes_for_module_memory.patch b/meta-oe/recipes-support/vboxguestdrivers/vboxguestdrivers/0002-fixes_for_module_memory.patch
deleted file mode 100644
index a3cfc3b370..0000000000
--- a/meta-oe/recipes-support/vboxguestdrivers/vboxguestdrivers/0002-fixes_for_module_memory.patch
+++ /dev/null
@@ -1,65 +0,0 @@
-From bb580f7b601e5395a2f8fcb2485387035273320f Mon Sep 17 00:00:00 2001
-From: Hongxu Jia <hongxu.jia@windriver.com>
-Date: Tue, 18 Aug 2020 17:49:34 +0800
-Subject: [PATCH 2/3] fixes_for_module_memory
-
-Upstream-Status: Backport [https://www.virtualbox.org/ticket/19644]
-
-Signed-off-by: Hongxu Jia <hongxu.jia@windriver.com>
----
- .../Runtime/r0drv/linux/alloc-r0drv-linux.c | 18 ++++++++++++++++--
- 1 file changed, 16 insertions(+), 2 deletions(-)
-
-diff --git a/src/VBox/Runtime/r0drv/linux/alloc-r0drv-linux.c b/src/VBox/Runtime/r0drv/linux/alloc-r0drv-linux.c
-index bbb8acc6..45cd34c7 100644
---- a/src/VBox/Runtime/r0drv/linux/alloc-r0drv-linux.c
-+++ b/src/VBox/Runtime/r0drv/linux/alloc-r0drv-linux.c
-@@ -153,6 +153,8 @@ RT_EXPORT_SYMBOL(RTR0MemExecDonate);
-
-
- #ifdef RTMEMALLOC_EXEC_VM_AREA
-+
-+
- /**
- * Allocate executable kernel memory in the module range.
- *
-@@ -168,7 +170,12 @@ static PRTMEMHDR rtR0MemAllocExecVmArea(size_t cb)
- struct vm_struct *pVmArea;
- size_t iPage;
-
-+# if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0)
-+ pVmArea = __get_vm_area_caller(cbAlloc, VM_ALLOC, MODULES_VADDR, MODULES_END,
-+ __builtin_return_address(0));
-+#else
- pVmArea = __get_vm_area(cbAlloc, VM_ALLOC, MODULES_VADDR, MODULES_END);
-+#endif
- if (!pVmArea)
- return NULL;
- pVmArea->nr_pages = 0; /* paranoia? */
-@@ -201,14 +208,21 @@ static PRTMEMHDR rtR0MemAllocExecVmArea(size_t cb)
- # endif
- pVmArea->nr_pages = cPages;
- pVmArea->pages = papPages;
-- if (!map_vm_area(pVmArea, PAGE_KERNEL_EXEC,
-+# if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0)
-+ unsigned long start = (unsigned long)pVmArea->addr;
-+ unsigned long size = get_vm_area_size(pVmArea);
-+
-+ if (!map_kernel_range(start, size, PAGE_KERNEL_EXEC, papPages))
-+#else
-+ if (!map_vm_area(pVmArea, PAGE_KERNEL_EXEC,
- # if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
- &papPagesIterator
- # else
- papPages
- # endif
- ))
-- {
-+#endif
-+ {
- PRTMEMLNXHDREX pHdrEx = (PRTMEMLNXHDREX)pVmArea->addr;
- pHdrEx->pVmArea = pVmArea;
- pHdrEx->pvDummy = NULL;
---
-2.18.2
-
diff --git a/meta-oe/recipes-support/vboxguestdrivers/vboxguestdrivers/0003-fixes_for_changes_in_cpu_tlbstate.patch b/meta-oe/recipes-support/vboxguestdrivers/vboxguestdrivers/0003-fixes_for_changes_in_cpu_tlbstate.patch
deleted file mode 100644
index 6a3e63f63d..0000000000
--- a/meta-oe/recipes-support/vboxguestdrivers/vboxguestdrivers/0003-fixes_for_changes_in_cpu_tlbstate.patch
+++ /dev/null
@@ -1,39 +0,0 @@
-From 6089974a81b1b44e1d2dfa5af1fdc110dfee40c1 Mon Sep 17 00:00:00 2001
-From: Hongxu Jia <hongxu.jia@windriver.com>
-Date: Tue, 18 Aug 2020 17:51:24 +0800
-Subject: [PATCH 3/3] fixes_for_changes_in_cpu_tlbstate
-
-Upstream-Status: Backport [https://www.virtualbox.org/ticket/19644]
-
-Signed-off-by: Hongxu Jia <hongxu.jia@windriver.com>
----
- src/VBox/HostDrivers/Support/linux/SUPDrv-linux.c | 7 +++++++
- 1 file changed, 7 insertions(+)
-
-diff --git a/src/VBox/HostDrivers/Support/linux/SUPDrv-linux.c b/src/VBox/HostDrivers/Support/linux/SUPDrv-linux.c
-index c7d0d99a..2e7aa6e1 100644
---- a/src/VBox/HostDrivers/Support/linux/SUPDrv-linux.c
-+++ b/src/VBox/HostDrivers/Support/linux/SUPDrv-linux.c
-@@ -757,12 +757,19 @@ EXPORT_SYMBOL(SUPDrvLinuxIDC);
- RTCCUINTREG VBOXCALL supdrvOSChangeCR4(RTCCUINTREG fOrMask, RTCCUINTREG fAndMask)
- {
- #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 20, 0)
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
- RTCCUINTREG uOld = this_cpu_read(cpu_tlbstate.cr4);
-+#else
-+ RTCCUINTREG uOld = __read_cr4();
-+#endif
- RTCCUINTREG uNew = (uOld & fAndMask) | fOrMask;
- if (uNew != uOld)
- {
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
- this_cpu_write(cpu_tlbstate.cr4, uNew);
- __write_cr4(uNew);
-+#endif
-+ ASMSetCR4(uNew);
- }
- #else
- RTCCUINTREG uOld = ASMGetCR4();
---
-2.18.2
-
diff --git a/meta-oe/recipes-support/vboxguestdrivers/vboxguestdrivers/021-linux-5-8.patch b/meta-oe/recipes-support/vboxguestdrivers/vboxguestdrivers/021-linux-5-8.patch
new file mode 100644
index 0000000000..9d45750608
--- /dev/null
+++ b/meta-oe/recipes-support/vboxguestdrivers/vboxguestdrivers/021-linux-5-8.patch
@@ -0,0 +1,5046 @@
+fix Linux 5.8
+
+This is a squashed patch with following upstream revisions:
+
+ r85208
+ r85430
+ r85431
+ r85432
+ r85447 # context required adjustment
+ r85453
+ r85460
+ r85461 # context required adjustment
+ r85500
+ r85501
+ r85503
+ r85504
+ r85505
+ r85506
+ r85507 # context required adjustment
+ r85509
+ r85510
+ r85511
+ r85514
+ r85516
+ r85517
+ r85518
+ r85525
+ r85526
+ r85527
+ r85533
+ r85534
+ r85540
+ r85541
+ r85545
+ r85546
+ r85552
+ r85555
+ r85556
+ r85590
+
+Thanks a lot to loqs for his hard work on FS#67488!
+
+--- a/src/VBox/Runtime/r0drv/linux/time-r0drv-linux.c
++++ b/src/VBox/Runtime/r0drv/linux/time-r0drv-linux.c
+@@ -31,6 +31,12 @@
+ #define LOG_GROUP RTLOGGROUP_TIME
+ #include "the-linux-kernel.h"
+ #include "internal/iprt.h"
++/* Make sure we have the setting functions we need for RTTimeNow: */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 16)
++# define RTTIME_INCL_TIMEVAL
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
++# define RTTIME_INCL_TIMESPEC
++#endif
+ #include <iprt/time.h>
+ #include <iprt/asm.h>
+
+@@ -181,22 +187,19 @@ RT_EXPORT_SYMBOL(RTTimeSystemMilliTS);
+ RTDECL(PRTTIMESPEC) RTTimeNow(PRTTIMESPEC pTime)
+ {
+ IPRT_LINUX_SAVE_EFL_AC();
+-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 16)
+-/* On Linux 4.20, time.h includes time64.h and we have to use 64-bit times. */
+-# ifdef _LINUX_TIME64_H
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)
+ struct timespec64 Ts;
+- ktime_get_real_ts64(&Ts);
+-# else
+- struct timespec Ts;
+- ktime_get_real_ts(&Ts);
+-# endif
++ ktime_get_real_ts64(&Ts); /* ktime_get_real_ts64 was added as a macro in 3.17, function since 4.18. */
+ IPRT_LINUX_RESTORE_EFL_AC();
+-# ifdef _LINUX_TIME64_H
+ return RTTimeSpecSetTimespec64(pTime, &Ts);
+-# else
++
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 16)
++ struct timespec Ts;
++ ktime_get_real_ts(&Ts); /* ktime_get_real_ts was removed in Linux 4.20. */
++ IPRT_LINUX_RESTORE_EFL_AC();
+ return RTTimeSpecSetTimespec(pTime, &Ts);
+-# endif
+-#else /* < 2.6.16 */
++
++#else /* < 2.6.16 */
+ struct timeval Tv;
+ do_gettimeofday(&Tv);
+ IPRT_LINUX_RESTORE_EFL_AC();
+--- a/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c
++++ b/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c
+@@ -52,6 +52,14 @@
+ # define PAGE_READONLY_EXEC PAGE_READONLY
+ #endif
+
++/** @def IPRT_USE_ALLOC_VM_AREA_FOR_EXEC
++ * Whether we use alloc_vm_area (3.2+) for executable memory.
++ * This is a must for 5.8+, but we enable it all the way back to 3.2.x for
++ * better W^R compliance (fExecutable flag). */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0) || defined(DOXYGEN_RUNNING)
++# define IPRT_USE_ALLOC_VM_AREA_FOR_EXEC
++#endif
++
+ /*
+ * 2.6.29+ kernels don't work with remap_pfn_range() anymore because
+ * track_pfn_vma_new() is apparently not defined for non-RAM pages.
+@@ -72,12 +80,27 @@
+ # define gfp_t unsigned
+ #endif
+
++/*
++ * Wrappers around mmap_lock/mmap_sem difference.
++ */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0)
++# define LNX_MM_DOWN_READ(a_pMm) down_read(&(a_pMm)->mmap_lock)
++# define LNX_MM_UP_READ(a_pMm) up_read(&(a_pMm)->mmap_lock)
++# define LNX_MM_DOWN_WRITE(a_pMm) down_write(&(a_pMm)->mmap_lock)
++# define LNX_MM_UP_WRITE(a_pMm) up_write(&(a_pMm)->mmap_lock)
++#else
++# define LNX_MM_DOWN_READ(a_pMm) down_read(&(a_pMm)->mmap_sem)
++# define LNX_MM_UP_READ(a_pMm) up_read(&(a_pMm)->mmap_sem)
++# define LNX_MM_DOWN_WRITE(a_pMm) down_write(&(a_pMm)->mmap_sem)
++# define LNX_MM_UP_WRITE(a_pMm) up_write(&(a_pMm)->mmap_sem)
++#endif
++
+
+ /*********************************************************************************************************************************
+ * Structures and Typedefs *
+ *********************************************************************************************************************************/
+ /**
+- * The Darwin version of the memory object structure.
++ * The Linux version of the memory object structure.
+ */
+ typedef struct RTR0MEMOBJLNX
+ {
+@@ -90,11 +113,20 @@ typedef struct RTR0MEMOBJLNX
+ bool fExecutable;
+ /** Set if we've vmap'ed the memory into ring-0. */
+ bool fMappedToRing0;
++#ifdef IPRT_USE_ALLOC_VM_AREA_FOR_EXEC
++ /** Return from alloc_vm_area() that we now need to use for executable
++ * memory. */
++ struct vm_struct *pArea;
++ /** PTE array that goes along with pArea (must be freed). */
++ pte_t **papPtesForArea;
++#endif
+ /** The pages in the apPages array. */
+ size_t cPages;
+ /** Array of struct page pointers. (variable size) */
+ struct page *apPages[1];
+-} RTR0MEMOBJLNX, *PRTR0MEMOBJLNX;
++} RTR0MEMOBJLNX;
++/** Pointer to the linux memory object. */
++typedef RTR0MEMOBJLNX *PRTR0MEMOBJLNX;
+
+
+ static void rtR0MemObjLinuxFreePages(PRTR0MEMOBJLNX pMemLnx);
+@@ -182,7 +214,7 @@ static pgprot_t rtR0MemObjLinuxConvertPr
+ * Worker for rtR0MemObjNativeReserveUser and rtR0MemObjNativerMapUser that creates
+ * an empty user space mapping.
+ *
+- * We acquire the mmap_sem of the task!
++ * We acquire the mmap_sem/mmap_lock of the task!
+ *
+ * @returns Pointer to the mapping.
+ * (void *)-1 on failure.
+@@ -222,9 +254,9 @@ static void *rtR0MemObjLinuxDoMmap(RTR3P
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
+ ulAddr = vm_mmap(NULL, R3PtrFixed, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, 0);
+ #else
+- down_write(&pTask->mm->mmap_sem);
++ LNX_MM_DOWN_WRITE(pTask->mm);
+ ulAddr = do_mmap(NULL, R3PtrFixed, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, 0);
+- up_write(&pTask->mm->mmap_sem);
++ LNX_MM_UP_WRITE(pTask->mm);
+ #endif
+ }
+ else
+@@ -232,9 +264,9 @@ static void *rtR0MemObjLinuxDoMmap(RTR3P
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
+ ulAddr = vm_mmap(NULL, 0, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS, 0);
+ #else
+- down_write(&pTask->mm->mmap_sem);
++ LNX_MM_DOWN_WRITE(pTask->mm);
+ ulAddr = do_mmap(NULL, 0, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS, 0);
+- up_write(&pTask->mm->mmap_sem);
++ LNX_MM_UP_WRITE(pTask->mm);
+ #endif
+ if ( !(ulAddr & ~PAGE_MASK)
+ && (ulAddr & (uAlignment - 1)))
+@@ -257,7 +289,7 @@ static void *rtR0MemObjLinuxDoMmap(RTR3P
+ * Worker that destroys a user space mapping.
+ * Undoes what rtR0MemObjLinuxDoMmap did.
+ *
+- * We acquire the mmap_sem of the task!
++ * We acquire the mmap_sem/mmap_lock of the task!
+ *
+ * @param pv The ring-3 mapping.
+ * @param cb The size of the mapping.
+@@ -269,13 +301,13 @@ static void rtR0MemObjLinuxDoMunmap(void
+ Assert(pTask == current); RT_NOREF_PV(pTask);
+ vm_munmap((unsigned long)pv, cb);
+ #elif defined(USE_RHEL4_MUNMAP)
+- down_write(&pTask->mm->mmap_sem);
++ LNX_MM_DOWN_WRITE(pTask->mm);
+ do_munmap(pTask->mm, (unsigned long)pv, cb, 0); /* should it be 1 or 0? */
+- up_write(&pTask->mm->mmap_sem);
++ LNX_MM_UP_WRITE(pTask->mm);
+ #else
+- down_write(&pTask->mm->mmap_sem);
++ LNX_MM_DOWN_WRITE(pTask->mm);
+ do_munmap(pTask->mm, (unsigned long)pv, cb);
+- up_write(&pTask->mm->mmap_sem);
++ LNX_MM_UP_WRITE(pTask->mm);
+ #endif
+ }
+
+@@ -520,15 +552,49 @@ static int rtR0MemObjLinuxVMap(PRTR0MEMO
+ pgprot_val(fPg) |= _PAGE_NX;
+ # endif
+
++# ifdef IPRT_USE_ALLOC_VM_AREA_FOR_EXEC
++ if (fExecutable)
++ {
++ pte_t **papPtes = (pte_t **)kmalloc_array(pMemLnx->cPages, sizeof(papPtes[0]), GFP_KERNEL);
++ if (papPtes)
++ {
++ pMemLnx->pArea = alloc_vm_area(pMemLnx->Core.cb, papPtes); /* Note! pArea->nr_pages is not set. */
++ if (pMemLnx->pArea)
++ {
++ size_t i;
++ Assert(pMemLnx->pArea->size >= pMemLnx->Core.cb); /* Note! includes guard page. */
++ Assert(pMemLnx->pArea->addr);
++# ifdef _PAGE_NX
++ pgprot_val(fPg) |= _PAGE_NX; /* Uses RTR0MemObjProtect to clear NX when memory ready, W^X fashion. */
++# endif
++ pMemLnx->papPtesForArea = papPtes;
++ for (i = 0; i < pMemLnx->cPages; i++)
++ *papPtes[i] = mk_pte(pMemLnx->apPages[i], fPg);
++ pMemLnx->Core.pv = pMemLnx->pArea->addr;
++ pMemLnx->fMappedToRing0 = true;
++ }
++ else
++ {
++ kfree(papPtes);
++ rc = VERR_MAP_FAILED;
++ }
++ }
++ else
++ rc = VERR_MAP_FAILED;
++ }
++ else
++# endif
++ {
+ # ifdef VM_MAP
+- pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_MAP, fPg);
++ pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_MAP, fPg);
+ # else
+- pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_ALLOC, fPg);
++ pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_ALLOC, fPg);
+ # endif
+- if (pMemLnx->Core.pv)
+- pMemLnx->fMappedToRing0 = true;
+- else
+- rc = VERR_MAP_FAILED;
++ if (pMemLnx->Core.pv)
++ pMemLnx->fMappedToRing0 = true;
++ else
++ rc = VERR_MAP_FAILED;
++ }
+ #else /* < 2.4.22 */
+ rc = VERR_NOT_SUPPORTED;
+ #endif
+@@ -554,6 +620,22 @@ static int rtR0MemObjLinuxVMap(PRTR0MEMO
+ static void rtR0MemObjLinuxVUnmap(PRTR0MEMOBJLNX pMemLnx)
+ {
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
++# ifdef IPRT_USE_ALLOC_VM_AREA_FOR_EXEC
++ if (pMemLnx->pArea)
++ {
++# if 0
++ pte_t **papPtes = pMemLnx->papPtesForArea;
++ size_t i;
++ for (i = 0; i < pMemLnx->cPages; i++)
++ *papPtes[i] = 0;
++# endif
++ free_vm_area(pMemLnx->pArea);
++ kfree(pMemLnx->papPtesForArea);
++ pMemLnx->pArea = NULL;
++ pMemLnx->papPtesForArea = NULL;
++ }
++ else
++# endif
+ if (pMemLnx->fMappedToRing0)
+ {
+ Assert(pMemLnx->Core.pv);
+@@ -593,7 +675,7 @@ DECLHIDDEN(int) rtR0MemObjNativeFree(RTR
+ size_t iPage;
+ Assert(pTask);
+ if (pTask && pTask->mm)
+- down_read(&pTask->mm->mmap_sem);
++ LNX_MM_DOWN_READ(pTask->mm);
+
+ iPage = pMemLnx->cPages;
+ while (iPage-- > 0)
+@@ -608,7 +690,7 @@ DECLHIDDEN(int) rtR0MemObjNativeFree(RTR
+ }
+
+ if (pTask && pTask->mm)
+- up_read(&pTask->mm->mmap_sem);
++ LNX_MM_UP_READ(pTask->mm);
+ }
+ /* else: kernel memory - nothing to do here. */
+ break;
+@@ -1076,7 +1158,7 @@ DECLHIDDEN(int) rtR0MemObjNativeLockUser
+ papVMAs = (struct vm_area_struct **)RTMemAlloc(sizeof(*papVMAs) * cPages);
+ if (papVMAs)
+ {
+- down_read(&pTask->mm->mmap_sem);
++ LNX_MM_DOWN_READ(pTask->mm);
+
+ /*
+ * Get user pages.
+@@ -1162,7 +1244,7 @@ DECLHIDDEN(int) rtR0MemObjNativeLockUser
+ papVMAs[rc]->vm_flags |= VM_DONTCOPY | VM_LOCKED;
+ }
+
+- up_read(&pTask->mm->mmap_sem);
++ LNX_MM_UP_READ(pTask->mm);
+
+ RTMemFree(papVMAs);
+
+@@ -1189,7 +1271,7 @@ DECLHIDDEN(int) rtR0MemObjNativeLockUser
+ #endif
+ }
+
+- up_read(&pTask->mm->mmap_sem);
++ LNX_MM_UP_READ(pTask->mm);
+
+ RTMemFree(papVMAs);
+ rc = VERR_LOCK_FAILED;
+@@ -1422,6 +1504,7 @@ DECLHIDDEN(int) rtR0MemObjNativeMapKerne
+ * Use vmap - 2.4.22 and later.
+ */
+ pgprot_t fPg = rtR0MemObjLinuxConvertProt(fProt, true /* kernel */);
++ /** @todo We don't really care too much for EXEC here... 5.8 always adds NX. */
+ Assert(((offSub + cbSub) >> PAGE_SHIFT) <= pMemLnxToMap->cPages);
+ # ifdef VM_MAP
+ pMemLnx->Core.pv = vmap(&pMemLnxToMap->apPages[offSub >> PAGE_SHIFT], cbSub >> PAGE_SHIFT, VM_MAP, fPg);
+@@ -1604,7 +1687,7 @@ DECLHIDDEN(int) rtR0MemObjNativeMapUser(
+ const size_t cPages = (offSub + cbSub) >> PAGE_SHIFT;
+ size_t iPage;
+
+- down_write(&pTask->mm->mmap_sem);
++ LNX_MM_DOWN_WRITE(pTask->mm);
+
+ rc = VINF_SUCCESS;
+ if (pMemLnxToMap->cPages)
+@@ -1721,7 +1804,7 @@ DECLHIDDEN(int) rtR0MemObjNativeMapUser(
+ }
+ #endif /* CONFIG_NUMA_BALANCING */
+
+- up_write(&pTask->mm->mmap_sem);
++ LNX_MM_UP_WRITE(pTask->mm);
+
+ if (RT_SUCCESS(rc))
+ {
+@@ -1753,6 +1836,29 @@ DECLHIDDEN(int) rtR0MemObjNativeMapUser(
+
+ DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
+ {
++# ifdef IPRT_USE_ALLOC_VM_AREA_FOR_EXEC
++ /*
++ * Currently only supported when we've got addresses PTEs from the kernel.
++ */
++ PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)pMem;
++ if (pMemLnx->pArea && pMemLnx->papPtesForArea)
++ {
++ pgprot_t const fPg = rtR0MemObjLinuxConvertProt(fProt, true /*fKernel*/);
++ size_t const cPages = (offSub + cbSub) >> PAGE_SHIFT;
++ pte_t **papPtes = pMemLnx->papPtesForArea;
++ size_t i;
++
++ for (i = offSub >> PAGE_SHIFT; i < cPages; i++)
++ {
++ set_pte(papPtes[i], mk_pte(pMemLnx->apPages[i], fPg));
++ }
++ preempt_disable();
++ __flush_tlb_all();
++ preempt_enable();
++ return VINF_SUCCESS;
++ }
++# endif
++
+ NOREF(pMem);
+ NOREF(offSub);
+ NOREF(cbSub);
+--- a/src/VBox/HostDrivers/Support/linux/SUPDrv-linux.c
++++ b/src/VBox/HostDrivers/Support/linux/SUPDrv-linux.c
+@@ -144,9 +144,9 @@ static int force_async_tsc = 0;
+ * Memory for the executable memory heap (in IPRT).
+ */
+ # ifdef DEBUG
+-# define EXEC_MEMORY_SIZE 8388608 /* 8 MB */
++# define EXEC_MEMORY_SIZE 10485760 /* 10 MB */
+ # else
+-# define EXEC_MEMORY_SIZE 2097152 /* 2 MB */
++# define EXEC_MEMORY_SIZE 8388608 /* 8 MB */
+ # endif
+ extern uint8_t g_abExecMemory[EXEC_MEMORY_SIZE];
+ # ifndef VBOX_WITH_TEXT_MODMEM_HACK
+@@ -756,20 +756,25 @@ EXPORT_SYMBOL(SUPDrvLinuxIDC);
+
+ RTCCUINTREG VBOXCALL supdrvOSChangeCR4(RTCCUINTREG fOrMask, RTCCUINTREG fAndMask)
+ {
+-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 20, 0)
+- RTCCUINTREG uOld = this_cpu_read(cpu_tlbstate.cr4);
+- RTCCUINTREG uNew = (uOld & fAndMask) | fOrMask;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0)
++ RTCCUINTREG const uOld = __read_cr4();
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 20, 0)
++ RTCCUINTREG const uOld = this_cpu_read(cpu_tlbstate.cr4);
++#else
++ RTCCUINTREG const uOld = ASMGetCR4();
++#endif
++ RTCCUINTREG const uNew = (uOld & fAndMask) | fOrMask;
+ if (uNew != uOld)
+ {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0)
++ ASMSetCR4(uNew);
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 20, 0)
+ this_cpu_write(cpu_tlbstate.cr4, uNew);
+ __write_cr4(uNew);
+- }
+ #else
+- RTCCUINTREG uOld = ASMGetCR4();
+- RTCCUINTREG uNew = (uOld & fAndMask) | fOrMask;
+- if (uNew != uOld)
+ ASMSetCR4(uNew);
+ #endif
++ }
+ return uOld;
+ }
+
+--- a/src/VBox/Additions/linux/sharedfolders/vfsmod.c
++++ b/src/VBox/Additions/linux/sharedfolders/vfsmod.c
+@@ -52,7 +52,7 @@
+ #endif
+ #include <linux/seq_file.h>
+ #include <linux/vfs.h>
+-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 62)
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 62) && LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+ # include <linux/vermagic.h>
+ #endif
+ #include <VBox/err.h>
+--- a/Config.kmk
++++ b/Config.kmk
+@@ -4462,15 +4462,20 @@ endif # pe
+
+ ifeq ($(VBOX_LDR_FMT),elf)
+ TEMPLATE_VBoxR0_TOOL = $(VBOX_GCC_TOOL)
+-TEMPLATE_VBoxR0_CFLAGS = -fno-pie -nostdinc -g $(VBOX_GCC_pipe) $(VBOX_GCC_WERR) $(VBOX_GCC_PEDANTIC_C) $(VBOX_GCC_Wno-variadic-macros) $(VBOX_GCC_R0_OPT) $(VBOX_GCC_R0_FP) -fno-strict-aliasing -fno-exceptions $(VBOX_GCC_fno-stack-protector) -fno-common $(VBOX_GCC_fvisibility-hidden) -std=gnu99 $(VBOX_GCC_IPRT_FMT_CHECK)
+-TEMPLATE_VBoxR0_CXXFLAGS = -fno-pie -nostdinc -g $(VBOX_GCC_pipe) $(VBOX_GCC_WERR) $(VBOX_GCC_PEDANTIC_CXX) $(VBOX_GCC_Wno-variadic-macros) $(VBOX_GCC_R0_OPT) $(VBOX_GCC_R0_FP) -fno-strict-aliasing -fno-exceptions $(VBOX_GCC_fno-stack-protector) -fno-common $(VBOX_GCC_fvisibility-inlines-hidden) $(VBOX_GCC_fvisibility-hidden) -fno-rtti $(VBOX_GCC_IPRT_FMT_CHECK)
+-TEMPLATE_VBoxR0_CFLAGS.amd64 = -m64 -mno-red-zone -mcmodel=kernel -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -fasynchronous-unwind-tables -ffreestanding
+-TEMPLATE_VBoxR0_CXXFLAGS.amd64 = -m64 -mno-red-zone -mcmodel=kernel -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -fasynchronous-unwind-tables
++TEMPLATE_VBoxR0_CFLAGS = -fno-pie -nostdinc -g $(VBOX_GCC_pipe) $(VBOX_GCC_WERR) $(VBOX_GCC_PEDANTIC_C) \
++ $(VBOX_GCC_Wno-variadic-macros) $(VBOX_GCC_R0_OPT) $(VBOX_GCC_R0_FP) -fno-strict-aliasing -fno-exceptions \
++ $(VBOX_GCC_fno-stack-protector) -fno-common $(VBOX_GCC_fvisibility-hidden) -std=gnu99 $(VBOX_GCC_IPRT_FMT_CHECK)
++TEMPLATE_VBoxR0_CXXFLAGS = -fno-pie -nostdinc -g $(VBOX_GCC_pipe) $(VBOX_GCC_WERR) $(VBOX_GCC_PEDANTIC_CXX) \
++ $(VBOX_GCC_Wno-variadic-macros) $(VBOX_GCC_R0_OPT) $(VBOX_GCC_R0_FP) -fno-strict-aliasing -fno-exceptions \
++ $(VBOX_GCC_fno-stack-protector) -fno-common $(VBOX_GCC_fvisibility-inlines-hidden) $(VBOX_GCC_fvisibility-hidden) \
++ -fno-rtti $(VBOX_GCC_std) $(VBOX_GCC_IPRT_FMT_CHECK)
+++TEMPLATE_VBoxR0_CFLAGS.amd64 = -m64 -mno-red-zone -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -fasynchronous-unwind-tables -ffreestanding
+++TEMPLATE_VBoxR0_CXXFLAGS.amd64 = -m64 -mno-red-zone -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -fasynchronous-unwind-tables
+ TEMPLATE_VBoxR0_CXXFLAGS.freebsd = -ffreestanding
+ if $(VBOX_GCC_VERSION_CC) < 30400
+ TEMPLATE_VBoxR0_DEFS += RT_WITHOUT_PRAGMA_ONCE
+ endif
+-ifeq ($(KBUILD_TARGET),solaris)
++ ifeq ($(KBUILD_TARGET),solaris)
+ TEMPLATE_VBoxR0_LDFLAGS = -r
+ TEMPLATE_VBoxR0_LDFLAGS.solaris = -u _init -u _info
+ TEMPLATE_VBoxR0_LIBS.solaris = \
+@@ -4481,19 +4486,32 @@ ifeq ($(KBUILD_TARGET),solaris)
+ endif
+ # Solaris driver signing.
+ TEMPLATE_VBoxR0_POST_CMDS = $(VBOX_SIGN_DRIVER_CMDS)
+-else
++ else
+ TEMPLATE_VBoxR0_LDFLAGS = -nostdlib -Bsymbolic -g
+ ## @todo WTF doesn't the globals work? Debug info is supposed to be split everywhere. GRR
+ TEMPLATE_VBoxR0_LD_DEBUG = split
+-endif
+-ifn1of ($(KBUILD_TARGET),solaris freebsd)
++ endif
++ if1of ($(KBUILD_TARGET), linux)
++VBOX_WITH_VBOXR0_AS_DLL = 1
++TEMPLATE_VBoxR0_DLLSUFF = .r0
++TEMPLATE_VBoxR0_CFLAGS += -fPIC
++TEMPLATE_VBoxR0_CXXFLAGS += -fPIC
++TEMPLATE_VBoxR0_LDFLAGS +=
++TEMPLATE_VBoxR0_DTRACE_HDR_FLAGS += --pic
++TEMPLATE_VBoxR0_DTRACE_OBJ_FLAGS += --pic
++ else
++TEMPLATE_VBoxR0_CFLAGS.amd64 += -mcmodel=kernel
++TEMPLATE_VBoxR0_CXXFLAGS.amd64 += -mcmodel=kernel
++ endif
++ ifn1of ($(KBUILD_TARGET),solaris freebsd)
+ TEMPLATE_VBoxR0_LIBS = \
+ $(VBOX_GCC_LIBGCC) # intrinsics
+-endif
+-if1of ($(KBUILD_TARGET),linux)
+- TEMPLATE_VBoxR0_POST_CMDS = $(if $(eq $(tool_do),LINK_SYSMOD),if readelf -S $(out)|grep -q "[cd]tors"; then echo "Found ctors/dtors in $(out)!"; exit 1; fi)
+-endif
+-endif
++ endif
++ if1of ($(KBUILD_TARGET),linux)
++ TEMPLATE_VBoxR0_POST_CMDS += $(NLTAB)\
++ $(if $(eq $(tool_do),LINK_SYSMOD),if readelf -S $(out)|grep -q "[cd]tors"; then echo "Found ctors/dtors in $(out)!"; exit 1; fi)
++ endif
++endif # elf
+
+ ifeq ($(VBOX_LDR_FMT),macho)
+ TEMPLATE_VBoxR0_TOOL = $(VBOX_GCC_TOOL)
+--- a/tools/bin/gen-slickedit-workspace.sh
++++ b/tools/bin/gen-slickedit-workspace.sh
+@@ -496,11 +496,13 @@ my_generate_usercpp_h()
+ #
+ # Probe the slickedit user config, picking the most recent version.
+ #
++ MY_VSLICK_DB_OLD=
+ if test -z "${MY_SLICK_CONFIG}"; then
+ if test -d "${HOME}/Library/Application Support/SlickEdit"; then
+ MY_SLICKDIR_="${HOME}/Library/Application Support/SlickEdit"
+ MY_USERCPP_H="unxcpp.h"
+ MY_VSLICK_DB="vslick.sta" # was .stu earlier, 24 is using .sta.
++ MY_VSLICK_DB_OLD="vslick.stu"
+ elif test -d "${HOMEDRIVE}${HOMEPATH}/Documents/My SlickEdit Config"; then
+ MY_SLICKDIR_="${HOMEDRIVE}${HOMEPATH}/Documents/My SlickEdit Config"
+ MY_USERCPP_H="usercpp.h"
+@@ -508,7 +510,8 @@ my_generate_usercpp_h()
+ else
+ MY_SLICKDIR_="${HOME}/.slickedit"
+ MY_USERCPP_H="unxcpp.h"
+- MY_VSLICK_DB="vslick.stu"
++ MY_VSLICK_DB="vslick.sta"
++ MY_VSLICK_DB_OLD="vslick.stu"
+ fi
+ else
+ MY_SLICKDIR_="${MY_SLICK_CONFIG}"
+@@ -517,7 +520,8 @@ my_generate_usercpp_h()
+ MY_VSLICK_DB="vslick.sta"
+ else
+ MY_USERCPP_H="unxcpp.h"
+- MY_VSLICK_DB="vslick.stu"
++ MY_VSLICK_DB="vslick.sta"
++ MY_VSLICK_DB_OLD="vslick.stu"
+ fi
+ # MacOS: Implement me!
+ fi
+@@ -526,7 +530,9 @@ my_generate_usercpp_h()
+ MY_VER="0.0.0"
+ for subdir in "${MY_SLICKDIR_}/"*;
+ do
+- if test -f "${subdir}/${MY_USERCPP_H}" -o -f "${subdir}/${MY_VSLICK_DB}"; then
++ if test -f "${subdir}/${MY_USERCPP_H}" \
++ -o -f "${subdir}/${MY_VSLICK_DB}" \
++ -o '(' -n "${MY_VSLICK_DB_OLD}" -a -f "${subdir}/${MY_VSLICK_DB_OLD}" ')'; then
+ MY_CUR_VER_NUM=0
+ MY_CUR_VER=`echo "${subdir}" | ${MY_SED} -e 's,^.*/,,g'`
+
+@@ -561,6 +567,7 @@ my_generate_usercpp_h()
+ echo "Found SlickEdit v${MY_VER} preprocessor file: ${MY_USERCPP_H_FULL}"
+ else
+ echo "Failed to locate SlickEdit preprocessor file. You need to manually merge ${MY_USERCPP_H}."
++ echo "dbg: MY_SLICKDIR=${MY_SLICKDIR} MY_USERCPP_H_FULL=${MY_USERCPP_H_FULL}"
+ MY_USERCPP_H_FULL=""
+ fi
+
+@@ -717,6 +724,10 @@ EOF
+ #define RTASN1TYPE_STANDARD_PROTOTYPES_NO_GET_CORE(a_TypeNm, a_DeclMacro, a_ImplExtNm) int a_ImplExtNm##_Init(P##a_TypeNm pThis, PCRTASN1ALLOCATORVTABLE pAllocator); int a_ImplExtNm##_Clone(P##a_TypeNm pThis, PC##a_TypeNm) pSrc, PCRTASN1ALLOCATORVTABLE pAllocator); void a_ImplExtNm##_Delete(P##a_TypeNm pThis); int a_ImplExtNm##_Enum(P##a_TypeNm pThis, PFNRTASN1ENUMCALLBACK pfnCallback, uint32_t uDepth, void *pvUser); int a_ImplExtNm##_Compare(PC##a_TypeNm) pLeft, PC##a_TypeNm pRight); int a_ImplExtNm##_DecodeAsn1(PRTASN1CURSOR pCursor, uint32_t fFlags, P##a_TypeNm pThis, const char *pszErrorTag); int a_ImplExtNm##_CheckSanity(PC##a_TypeNm pThis, uint32_t fFlags, PRTERRINFO pErrInfo, const char *pszErrorTag)
+ #define RTASN1TYPE_STANDARD_PROTOTYPES(a_TypeNm, a_DeclMacro, a_ImplExtNm, a_Asn1CoreNm) inline PRTASN1CORE a_ImplExtNm##_GetAsn1Core(PC##a_TypeNm pThis) { return (PRTASN1CORE)&pThis->a_Asn1CoreNm; } inline bool a_ImplExtNm##_IsPresent(PC##a_TypeNm pThis) { return pThis && RTASN1CORE_IS_PRESENT(&pThis->a_Asn1CoreNm); } RTASN1TYPE_STANDARD_PROTOTYPES_NO_GET_CORE(a_TypeNm, a_DeclMacro, a_ImplExtNm)
+
++#define RTLDRELF_NAME(name) rtldrELF64##name
++#define RTLDRELF_SUFF(name) name##64
++#define RTLDRELF_MID(pre,suff) pre##64##suff
++
+ #define BS3_DECL(type) type
+ #define BS3_DECL_CALLBACK(type) type
+ #define TMPL_NM(name) name##_mmm
+--- a/include/iprt/asmdefs.mac
++++ b/include/iprt/asmdefs.mac
+@@ -841,18 +841,18 @@ size NAME(%1 %+ _EndProc) 0
+ ; is defined and RT_WITHOUT_NOCRT_WRAPPERS isn't.
+ ;
+ %macro RT_NOCRT_BEGINPROC 1
+-%ifdef RT_WITH_NOCRT_ALIASES
+-BEGINPROC RT_NOCRT(%1)
+-%ifdef ASM_FORMAT_ELF
++ %ifdef RT_WITH_NOCRT_ALIASES
++BEGINPROC_EXPORTED RT_NOCRT(%1)
++ %ifdef ASM_FORMAT_ELF
+ global NAME(%1)
+ weak NAME(%1)
+ NAME(%1):
+-%else
++ %else
+ GLOBALNAME %1
+-%endif
+-%else ; !RT_WITH_NOCRT_ALIASES
+-BEGINPROC RT_NOCRT(%1)
+-%endif ; !RT_WITH_NOCRT_ALIASES
++ %endif
++ %else ; !RT_WITH_NOCRT_ALIASES
++BEGINPROC_EXPORTED RT_NOCRT(%1)
++ %endif ; !RT_WITH_NOCRT_ALIASES
+ %endmacro ; RT_NOCRT_BEGINPROC
+
+ %ifdef RT_WITH_NOCRT_ALIASES
+--- a/src/VBox/Runtime/testcase/tstLdr-4.cpp
++++ b/src/VBox/Runtime/testcase/tstLdr-4.cpp
+@@ -35,9 +35,9 @@
+ #include <iprt/assert.h>
+ #include <iprt/param.h>
+ #include <iprt/path.h>
+-#include <iprt/initterm.h>
+ #include <iprt/err.h>
+ #include <iprt/string.h>
++#include <iprt/test.h>
+
+ #include <VBox/sup.h>
+
+@@ -45,8 +45,9 @@
+ /*********************************************************************************************************************************
+ * Global Variables *
+ *********************************************************************************************************************************/
+-static SUPGLOBALINFOPAGE g_MyGip = { SUPGLOBALINFOPAGE_MAGIC, SUPGLOBALINFOPAGE_VERSION, SUPGIPMODE_INVARIANT_TSC, 42 };
+-static PSUPGLOBALINFOPAGE g_pMyGip = &g_MyGip;
++static RTTEST g_hTest;
++static SUPGLOBALINFOPAGE g_MyGip = { SUPGLOBALINFOPAGE_MAGIC, SUPGLOBALINFOPAGE_VERSION, SUPGIPMODE_INVARIANT_TSC, 42 };
++static PSUPGLOBALINFOPAGE g_pMyGip = &g_MyGip;
+
+ extern "C" DECLEXPORT(int) DisasmTest1(void);
+
+@@ -58,6 +59,60 @@ static DECLCALLBACK(int) testEnumSegment
+ " link=%RTptr LB %RTptr align=%RTptr fProt=%#x offFile=%RTfoff\n"
+ , *piSeg, pSeg->RVA, pSeg->cbMapped, pSeg->pszName,
+ pSeg->LinkAddress, pSeg->cb, pSeg->Alignment, pSeg->fProt, pSeg->offFile);
++
++ if (pSeg->RVA != NIL_RTLDRADDR)
++ {
++ RTTESTI_CHECK(pSeg->cbMapped != NIL_RTLDRADDR);
++ RTTESTI_CHECK(pSeg->cbMapped >= pSeg->cb);
++ }
++ else
++ {
++ RTTESTI_CHECK(pSeg->cbMapped == NIL_RTLDRADDR);
++ }
++
++ /*
++ * Do some address conversion tests:
++ */
++ if (pSeg->cbMapped != NIL_RTLDRADDR)
++ {
++ /* RTLdrRvaToSegOffset: */
++ uint32_t iSegConv = ~(uint32_t)42;
++ RTLDRADDR offSegConv = ~(RTLDRADDR)22;
++ int rc = RTLdrRvaToSegOffset(hLdrMod, pSeg->RVA, &iSegConv, &offSegConv);
++ if (RT_FAILURE(rc))
++ RTTestIFailed("RTLdrRvaToSegOffset failed on Seg #%u / RVA %#RTptr: %Rrc", *piSeg, pSeg->RVA, rc);
++ else if (iSegConv != *piSeg || offSegConv != 0)
++ RTTestIFailed("RTLdrRvaToSegOffset on Seg #%u / RVA %#RTptr returned: iSegConv=%#x offSegConv=%RTptr, expected %#x and 0",
++ *piSeg, pSeg->RVA, iSegConv, offSegConv, *piSeg);
++
++ /* RTLdrSegOffsetToRva: */
++ RTLDRADDR uRvaConv = ~(RTLDRADDR)22;
++ rc = RTLdrSegOffsetToRva(hLdrMod, *piSeg, 0, &uRvaConv);
++ if (RT_FAILURE(rc))
++ RTTestIFailed("RTLdrSegOffsetToRva failed on Seg #%u / off 0: %Rrc", *piSeg, rc);
++ else if (uRvaConv != pSeg->RVA)
++ RTTestIFailed("RTLdrSegOffsetToRva on Seg #%u / off 0 returned: %RTptr, expected %RTptr", *piSeg, uRvaConv, pSeg->RVA);
++
++ /* RTLdrLinkAddressToRva: */
++ uRvaConv = ~(RTLDRADDR)22;
++ rc = RTLdrLinkAddressToRva(hLdrMod, pSeg->LinkAddress, &uRvaConv);
++ if (RT_FAILURE(rc))
++ RTTestIFailed("RTLdrLinkAddressToRva failed on Seg #%u / %RTptr: %Rrc", *piSeg, pSeg->LinkAddress, rc);
++ else if (uRvaConv != pSeg->RVA)
++ RTTestIFailed("RTLdrLinkAddressToRva on Seg #%u / %RTptr returned: %RTptr, expected %RTptr",
++ *piSeg, pSeg->LinkAddress, uRvaConv, pSeg->RVA);
++
++ /* RTLdrLinkAddressToSegOffset: */
++ iSegConv = ~(uint32_t)42;
++ offSegConv = ~(RTLDRADDR)22;
++ rc = RTLdrLinkAddressToSegOffset(hLdrMod, pSeg->LinkAddress, &iSegConv, &offSegConv);
++ if (RT_FAILURE(rc))
++ RTTestIFailed("RTLdrLinkAddressToSegOffset failed on Seg #%u / %#RTptr: %Rrc", *piSeg, pSeg->LinkAddress, rc);
++ else if (iSegConv != *piSeg || offSegConv != 0)
++ RTTestIFailed("RTLdrLinkAddressToSegOffset on Seg #%u / %#RTptr returned: iSegConv=%#x offSegConv=%RTptr, expected %#x and 0",
++ *piSeg, pSeg->LinkAddress, iSegConv, offSegConv, *piSeg);
++ }
++
+ *piSeg += 1;
+ RT_NOREF(hLdrMod);
+ return VINF_SUCCESS;
+@@ -125,12 +180,12 @@ static DECLCALLBACK(int) testGetImport(R
+ * regions the for compare usage. The third is loaded into one
+ * and then relocated between the two and other locations a few times.
+ *
+- * @returns number of errors.
+ * @param pszFilename The file to load the mess with.
+ */
+-static int testLdrOne(const char *pszFilename)
++static void testLdrOne(const char *pszFilename)
+ {
+- int cErrors = 0;
++ RTTestSub(g_hTest, RTPathFilename(pszFilename));
++
+ size_t cbImage = 0;
+ struct Load
+ {
+@@ -155,9 +210,8 @@ static int testLdrOne(const char *pszFil
+ rc = RTLdrOpen(pszFilename, 0, RTLDRARCH_WHATEVER, &aLoads[i].hLdrMod);
+ if (RT_FAILURE(rc))
+ {
+- RTPrintf("tstLdr-4: Failed to open '%s'/%d, rc=%Rrc. aborting test.\n", pszFilename, i, rc);
++ RTTestIFailed("tstLdr-4: Failed to open '%s'/%d, rc=%Rrc. aborting test.", pszFilename, i, rc);
+ Assert(aLoads[i].hLdrMod == NIL_RTLDRMOD);
+- cErrors++;
+ break;
+ }
+
+@@ -165,8 +219,7 @@ static int testLdrOne(const char *pszFil
+ size_t cb = RTLdrSize(aLoads[i].hLdrMod);
+ if (cbImage && cb != cbImage)
+ {
+- RTPrintf("tstLdr-4: Size mismatch '%s'/%d. aborting test.\n", pszFilename, i);
+- cErrors++;
++ RTTestIFailed("tstLdr-4: Size mismatch '%s'/%d. aborting test.", pszFilename, i);
+ break;
+ }
+ aLoads[i].cbBits = cbImage = cb;
+@@ -175,8 +228,7 @@ static int testLdrOne(const char *pszFil
+ aLoads[i].pvBits = RTMemExecAlloc(cb);
+ if (!aLoads[i].pvBits)
+ {
+- RTPrintf("tstLdr-4: Out of memory '%s'/%d cbImage=%d. aborting test.\n", pszFilename, i, cbImage);
+- cErrors++;
++ RTTestIFailed("Out of memory '%s'/%d cbImage=%d. aborting test.", pszFilename, i, cbImage);
+ break;
+ }
+
+@@ -184,8 +236,7 @@ static int testLdrOne(const char *pszFil
+ rc = RTLdrGetBits(aLoads[i].hLdrMod, aLoads[i].pvBits, (uintptr_t)aLoads[i].pvBits, testGetImport, NULL);
+ if (RT_FAILURE(rc))
+ {
+- RTPrintf("tstLdr-4: Failed to get bits for '%s'/%d, rc=%Rrc. aborting test\n", pszFilename, i, rc);
+- cErrors++;
++ RTTestIFailed("Failed to get bits for '%s'/%d, rc=%Rrc. aborting test", pszFilename, i, rc);
+ break;
+ }
+ }
+@@ -193,7 +244,7 @@ static int testLdrOne(const char *pszFil
+ /*
+ * Execute the code.
+ */
+- if (!cErrors)
++ if (!RTTestSubErrorCount(g_hTest))
+ {
+ for (i = 0; i < RT_ELEMENTS(aLoads); i += 1)
+ {
+@@ -209,22 +260,18 @@ static int testLdrOne(const char *pszFil
+ UINT32_MAX, "_DisasmTest1", &Value);
+ if (RT_FAILURE(rc))
+ {
+- RTPrintf("tstLdr-4: Failed to get symbol \"DisasmTest1\" from load #%d: %Rrc\n", i, rc);
+- cErrors++;
++ RTTestIFailed("Failed to get symbol \"DisasmTest1\" from load #%d: %Rrc", i, rc);
+ break;
+ }
+ DECLCALLBACKPTR(int, pfnDisasmTest1)(void) = (DECLCALLBACKPTR(int, RT_NOTHING)(void))(uintptr_t)Value; /* eeeh. */
+- RTPrintf("tstLdr-4: pfnDisasmTest1=%p / add-symbol-file %s %#x\n", pfnDisasmTest1, pszFilename, aLoads[i].pvBits);
++ RTPrintf("tstLdr-4: pfnDisasmTest1=%p / add-symbol-file %s %#p\n", pfnDisasmTest1, pszFilename, aLoads[i].pvBits);
+ uint32_t iSeg = 0;
+ RTLdrEnumSegments(aLoads[i].hLdrMod, testEnumSegment, &iSeg);
+
+ /* call the test function. */
+ rc = pfnDisasmTest1();
+ if (rc)
+- {
+- RTPrintf("tstLdr-4: load #%d Test1 -> %#x\n", i, rc);
+- cErrors++;
+- }
++ RTTestIFailed("load #%d Test1 -> %#x", i, rc);
+
+ /* While we're here, check a couple of RTLdrQueryProp calls too */
+ void *pvBits = aLoads[i].pvBits;
+@@ -255,56 +302,42 @@ static int testLdrOne(const char *pszFil
+ {
+ rc = RTLdrClose(aLoads[i].hLdrMod);
+ if (RT_FAILURE(rc))
+- {
+- RTPrintf("tstLdr-4: Failed to close '%s' i=%d, rc=%Rrc.\n", pszFilename, i, rc);
+- cErrors++;
+- }
++ RTTestIFailed("Failed to close '%s' i=%d, rc=%Rrc.", pszFilename, i, rc);
+ }
+ }
+
+- return cErrors;
+ }
+
+
+
+-int main(int argc, char **argv)
++int main()
+ {
+- int cErrors = 0;
+- RTR3InitExe(argc, &argv, 0);
++ RTEXITCODE rcExit = RTTestInitAndCreate("tstLdr-4", &g_hTest);
++ if (rcExit != RTEXITCODE_SUCCESS)
++ return rcExit;
+
+ /*
+ * Sanity check.
+ */
+ int rc = DisasmTest1();
+- if (rc)
++ if (rc == 0)
+ {
+- RTPrintf("tstLdr-4: FATAL ERROR - DisasmTest1 is buggy: rc=%#x\n", rc);
+- return 1;
+- }
++ /*
++ * Execute the test.
++ */
++ char szPath[RTPATH_MAX];
++ rc = RTPathExecDir(szPath, sizeof(szPath) - sizeof("/tstLdrObjR0.r0"));
++ if (RT_SUCCESS(rc))
++ {
++ strcat(szPath, "/tstLdrObjR0.r0");
+
+- /*
+- * Execute the test.
+- */
+- char szPath[RTPATH_MAX];
+- rc = RTPathExecDir(szPath, sizeof(szPath) - sizeof("/tstLdrObjR0.r0"));
+- if (RT_SUCCESS(rc))
+- {
+- strcat(szPath, "/tstLdrObjR0.r0");
+- RTPrintf("tstLdr-4: TESTING '%s'...\n", szPath);
+- cErrors += testLdrOne(szPath);
++ testLdrOne(szPath);
++ }
++ else
++ RTTestIFailed("RTPathExecDir -> %Rrc", rc);
+ }
+ else
+- {
+- RTPrintf("tstLdr-4: RTPathExecDir -> %Rrc\n", rc);
+- cErrors++;
+- }
++ RTTestIFailed("FATAL ERROR - DisasmTest1 is buggy: rc=%#x", rc);
+
+- /*
+- * Test result summary.
+- */
+- if (!cErrors)
+- RTPrintf("tstLdr-4: SUCCESS\n");
+- else
+- RTPrintf("tstLdr-4: FAILURE - %d errors\n", cErrors);
+- return !!cErrors;
++ return RTTestSummaryAndDestroy(g_hTest);
+ }
+--- a/include/iprt/formats/elf-common.h
++++ b/include/iprt/formats/elf-common.h
+@@ -198,6 +198,12 @@ typedef struct {
+ #define PT_LOPROC 0x70000000 /* First processor-specific type. */
+ #define PT_HIPROC 0x7fffffff /* Last processor-specific type. */
+
++#define PT_GNU_EH_FRAME 0x6474e550 /**< GNU/Linux -> .eh_frame_hdr */
++#define PT_GNU_STACK 0x6474e551 /**< GNU/Linux -> stack prot (RWX or RW) */
++#define PT_GNU_RELRO 0x6474e552 /**< GNU/Linux -> make RO after relocations */
++#define PT_GNU_PROPERTY 0x6474e553 /**< GNU/Linux -> .note.gnu.property */
++
++
+ /* Values for p_flags. */
+ #define PF_X 0x1 /* Executable. */
+ #define PF_W 0x2 /* Writable. */
+--- a/src/VBox/Runtime/common/ldr/ldrELF.cpp
++++ b/src/VBox/Runtime/common/ldr/ldrELF.cpp
+@@ -51,9 +51,11 @@
+ * Defined Constants And Macros *
+ *********************************************************************************************************************************/
+ /** Finds an ELF symbol table string. */
+-#define ELF_STR(pHdrs, iStr) ((pHdrs)->pStr + (iStr))
++#define ELF_STR(pHdrs, iStr) ((pHdrs)->Rel.pStr + (iStr))
++/** Finds an ELF symbol table string. */
++#define ELF_DYN_STR(pHdrs, iStr) ((pHdrs)->Dyn.pStr + (iStr))
+ /** Finds an ELF section header string. */
+-#define ELF_SH_STR(pHdrs, iStr) ((pHdrs)->pShStr + (iStr))
++#define ELF_SH_STR(pHdrs, iStr) ((pHdrs)->pShStr + (iStr))
+
+
+
+@@ -62,6 +64,7 @@
+ *********************************************************************************************************************************/
+ #ifdef LOG_ENABLED
+ static const char *rtldrElfGetShdrType(uint32_t iType);
++static const char *rtldrElfGetPhdrType(uint32_t iType);
+ #endif
+
+
+@@ -81,6 +84,7 @@ static const char *rtldrElfGetShdrType(u
+
+
+ #ifdef LOG_ENABLED
++
+ /**
+ * Gets the section type.
+ *
+@@ -91,23 +95,51 @@ static const char *rtldrElfGetShdrType(u
+ {
+ switch (iType)
+ {
+- case SHT_NULL: return "SHT_NULL";
+- case SHT_PROGBITS: return "SHT_PROGBITS";
+- case SHT_SYMTAB: return "SHT_SYMTAB";
+- case SHT_STRTAB: return "SHT_STRTAB";
+- case SHT_RELA: return "SHT_RELA";
+- case SHT_HASH: return "SHT_HASH";
+- case SHT_DYNAMIC: return "SHT_DYNAMIC";
+- case SHT_NOTE: return "SHT_NOTE";
+- case SHT_NOBITS: return "SHT_NOBITS";
+- case SHT_REL: return "SHT_REL";
+- case SHT_SHLIB: return "SHT_SHLIB";
+- case SHT_DYNSYM: return "SHT_DYNSYM";
++ RT_CASE_RET_STR(SHT_NULL);
++ RT_CASE_RET_STR(SHT_PROGBITS);
++ RT_CASE_RET_STR(SHT_SYMTAB);
++ RT_CASE_RET_STR(SHT_STRTAB);
++ RT_CASE_RET_STR(SHT_RELA);
++ RT_CASE_RET_STR(SHT_HASH);
++ RT_CASE_RET_STR(SHT_DYNAMIC);
++ RT_CASE_RET_STR(SHT_NOTE);
++ RT_CASE_RET_STR(SHT_NOBITS);
++ RT_CASE_RET_STR(SHT_REL);
++ RT_CASE_RET_STR(SHT_SHLIB);
++ RT_CASE_RET_STR(SHT_DYNSYM);
+ default:
+ return "";
+ }
+ }
+-#endif
++
++/**
++ * Gets the program header type.
++ *
++ * @returns Pointer to read only string.
++ * @param iType The section type index.
++ */
++static const char *rtldrElfGetPhdrType(uint32_t iType)
++{
++ switch (iType)
++ {
++ RT_CASE_RET_STR(PT_NULL);
++ RT_CASE_RET_STR(PT_LOAD);
++ RT_CASE_RET_STR(PT_DYNAMIC);
++ RT_CASE_RET_STR(PT_INTERP);
++ RT_CASE_RET_STR(PT_NOTE);
++ RT_CASE_RET_STR(PT_SHLIB);
++ RT_CASE_RET_STR(PT_PHDR);
++ RT_CASE_RET_STR(PT_TLS);
++ RT_CASE_RET_STR(PT_GNU_EH_FRAME);
++ RT_CASE_RET_STR(PT_GNU_STACK);
++ RT_CASE_RET_STR(PT_GNU_RELRO);
++ RT_CASE_RET_STR(PT_GNU_PROPERTY);
++ default:
++ return "";
++ }
++}
++
++#endif /* LOG_ENABLED*/
+
+
+ /**
+@@ -124,8 +156,6 @@ DECLHIDDEN(int) rtldrELFOpen(PRTLDRREADE
+ {
+ const char *pszLogName = pReader->pfnLogName(pReader); NOREF(pszLogName);
+
+- RT_NOREF_PV(pErrInfo); /** @todo implement */
+-
+ /*
+ * Read the ident to decide if this is 32-bit or 64-bit
+ * and worth dealing with.
+@@ -134,6 +164,7 @@ DECLHIDDEN(int) rtldrELFOpen(PRTLDRREADE
+ int rc = pReader->pfnRead(pReader, &e_ident, sizeof(e_ident), 0);
+ if (RT_FAILURE(rc))
+ return rc;
++
+ if ( e_ident[EI_MAG0] != ELFMAG0
+ || e_ident[EI_MAG1] != ELFMAG1
+ || e_ident[EI_MAG2] != ELFMAG2
+@@ -141,19 +172,17 @@ DECLHIDDEN(int) rtldrELFOpen(PRTLDRREADE
+ || ( e_ident[EI_CLASS] != ELFCLASS32
+ && e_ident[EI_CLASS] != ELFCLASS64)
+ )
+- {
+- Log(("RTLdrELF: %s: Unsupported/invalid ident %.*Rhxs\n", pszLogName, sizeof(e_ident), e_ident));
+- return VERR_BAD_EXE_FORMAT;
+- }
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: Unsupported/invalid ident %.*Rhxs", pszLogName, sizeof(e_ident), e_ident);
++
+ if (e_ident[EI_DATA] != ELFDATA2LSB)
+- {
+- Log(("RTLdrELF: %s: ELF endian %x is unsupported\n", pszLogName, e_ident[EI_DATA]));
+- return VERR_LDRELF_ODD_ENDIAN;
+- }
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_LDRELF_ODD_ENDIAN,
++ "%s: ELF endian %x is unsupported", pszLogName, e_ident[EI_DATA]);
++
+ if (e_ident[EI_CLASS] == ELFCLASS32)
+- rc = rtldrELF32Open(pReader, fFlags, enmArch, phLdrMod);
++ rc = rtldrELF32Open(pReader, fFlags, enmArch, phLdrMod, pErrInfo);
+ else
+- rc = rtldrELF64Open(pReader, fFlags, enmArch, phLdrMod);
++ rc = rtldrELF64Open(pReader, fFlags, enmArch, phLdrMod, pErrInfo);
+ return rc;
+ }
+
+--- a/src/VBox/Runtime/common/ldr/ldrELFRelocatable.cpp.h
++++ b/src/VBox/Runtime/common/ldr/ldrELFRelocatable.cpp.h
+@@ -29,31 +29,37 @@
+ * Defined Constants And Macros *
+ *******************************************************************************/
+ #if ELF_MODE == 32
+-#define RTLDRELF_NAME(name) rtldrELF32##name
+-#define RTLDRELF_SUFF(name) name##32
+-#define RTLDRELF_MID(pre,suff) pre##32##suff
+-#define FMT_ELF_ADDR "%08RX32"
+-#define FMT_ELF_HALF "%04RX16"
+-#define FMT_ELF_OFF "%08RX32"
+-#define FMT_ELF_SIZE "%08RX32"
+-#define FMT_ELF_SWORD "%RI32"
+-#define FMT_ELF_WORD "%08RX32"
+-#define FMT_ELF_XWORD "%08RX32"
+-#define FMT_ELF_SXWORD "%RI32"
++# define RTLDRELF_NAME(name) rtldrELF32##name
++# define RTLDRELF_SUFF(name) name##32
++# define RTLDRELF_MID(pre,suff) pre##32##suff
++# define FMT_ELF_ADDR "%08RX32"
++# define FMT_ELF_ADDR7 "%07RX32"
++# define FMT_ELF_HALF "%04RX16"
++# define FMT_ELF_OFF "%08RX32"
++# define FMT_ELF_SIZE "%08RX32"
++# define FMT_ELF_SWORD "%RI32"
++# define FMT_ELF_WORD "%08RX32"
++# define FMT_ELF_XWORD "%08RX32"
++# define FMT_ELF_SXWORD "%RI32"
++# define Elf_Xword Elf32_Word
++# define Elf_Sxword Elf32_Sword
+
+ #elif ELF_MODE == 64
+-#define RTLDRELF_NAME(name) rtldrELF64##name
+-#define RTLDRELF_SUFF(name) name##64
+-#define RTLDRELF_MID(pre,suff) pre##64##suff
+-#define FMT_ELF_ADDR "%016RX64"
+-#define FMT_ELF_HALF "%04RX16"
+-#define FMT_ELF_SHALF "%RI16"
+-#define FMT_ELF_OFF "%016RX64"
+-#define FMT_ELF_SIZE "%016RX64"
+-#define FMT_ELF_SWORD "%RI32"
+-#define FMT_ELF_WORD "%08RX32"
+-#define FMT_ELF_XWORD "%016RX64"
+-#define FMT_ELF_SXWORD "%RI64"
++# define RTLDRELF_NAME(name) rtldrELF64##name
++# define RTLDRELF_SUFF(name) name##64
++# define RTLDRELF_MID(pre,suff) pre##64##suff
++# define FMT_ELF_ADDR "%016RX64"
++# define FMT_ELF_ADDR7 "%08RX64"
++# define FMT_ELF_HALF "%04RX16"
++# define FMT_ELF_SHALF "%RI16"
++# define FMT_ELF_OFF "%016RX64"
++# define FMT_ELF_SIZE "%016RX64"
++# define FMT_ELF_SWORD "%RI32"
++# define FMT_ELF_WORD "%08RX32"
++# define FMT_ELF_XWORD "%016RX64"
++# define FMT_ELF_SXWORD "%RI64"
++# define Elf_Xword Elf64_Xword
++# define Elf_Sxword Elf64_Sxword
+ #endif
+
+ #define Elf_Ehdr RTLDRELF_MID(Elf,_Ehdr)
+@@ -74,6 +80,9 @@
+ #define RTLDRMODELF RTLDRELF_MID(RTLDRMODELF,RT_NOTHING)
+ #define PRTLDRMODELF RTLDRELF_MID(PRTLDRMODELF,RT_NOTHING)
+
++#define RTLDRMODELFSHX RTLDRELF_MID(RTLDRMODELFSHX,RT_NOTHING)
++#define PRTLDRMODELFSHX RTLDRELF_MID(PRTLDRMODELFSHX,RT_NOTHING)
++
+ #define ELF_R_SYM(info) RTLDRELF_MID(ELF,_R_SYM)(info)
+ #define ELF_R_TYPE(info) RTLDRELF_MID(ELF,_R_TYPE)(info)
+ #define ELF_R_INFO(sym, type) RTLDRELF_MID(ELF,_R_INFO)(sym, type)
+@@ -86,6 +95,20 @@
+ * Structures and Typedefs *
+ *******************************************************************************/
+ /**
++ * Extra section info.
++ */
++typedef struct RTLDRMODELFSHX
++{
++ /** The corresponding program header. */
++ uint16_t idxPhdr;
++ /** The corresponding dynamic section entry (address). */
++ uint16_t idxDt;
++ /** The DT tag. */
++ uint32_t uDtTag;
++} RTLDRMODELFSHX;
++typedef RTLDRMODELFSHX *PRTLDRMODELFSHX;
++
++/**
+ * The ELF loader structure.
+ */
+ typedef struct RTLDRMODELF
+@@ -105,36 +128,82 @@ typedef struct RTLDRMODELF
+ /** Unmodified section headers (allocated after paShdrs, so no need to free).
+ * Not valid if the image is DONE. */
+ Elf_Shdr const *paOrgShdrs;
++ /** Runs parallel to paShdrs and is part of the same allocation. */
++ PRTLDRMODELFSHX paShdrExtras;
++ /** Base section number, either 1 or zero depending on whether we've
++ * re-used the NULL entry for .elf.headers in ET_EXEC/ET_DYN. */
++ unsigned iFirstSect;
++ /** Set if the SHF_ALLOC section headers are in order of sh_addr. */
++ bool fShdrInOrder;
+ /** The size of the loaded image. */
+ size_t cbImage;
+
+ /** The image base address if it's an EXEC or DYN image. */
+ Elf_Addr LinkAddress;
+
+- /** The symbol section index. */
+- unsigned iSymSh;
+- /** Number of symbols in the table. */
+- unsigned cSyms;
+- /** Pointer to symbol table within RTLDRMODELF::pvBits. */
+- const Elf_Sym *paSyms;
+-
+- /** The string section index. */
+- unsigned iStrSh;
+- /** Size of the string table. */
+- unsigned cbStr;
+- /** Pointer to string table within RTLDRMODELF::pvBits. */
+- const char *pStr;
++ struct
++ {
++ /** The symbol section index. */
++ unsigned iSymSh;
++ /** Number of symbols in the table. */
++ unsigned cSyms;
++ /** Pointer to symbol table within RTLDRMODELF::pvBits. */
++ const Elf_Sym *paSyms;
++
++ /** The string section index. */
++ unsigned iStrSh;
++ /** Size of the string table. */
++ unsigned cbStr;
++ /** Pointer to string table within RTLDRMODELF::pvBits. */
++ const char *pStr;
++ } Rel /**< Regular symbols and strings. */
++ , Dyn /**< Dynamic symbols and strings. */;
+
+- /** Size of the section header string table. */
+- unsigned cbShStr;
+ /** Pointer to section header string table within RTLDRMODELF::pvBits. */
+ const char *pShStr;
++ /** Size of the section header string table. */
++ unsigned cbShStr;
+
+ /** The '.eh_frame' section index. Zero if not searched for, ~0U if not found. */
+ unsigned iShEhFrame;
+ /** The '.eh_frame_hdr' section index. Zero if not searched for, ~0U if not found. */
+ unsigned iShEhFrameHdr;
+-} RTLDRMODELF, *PRTLDRMODELF;
++
++ /** The '.dynamic' / SHT_DYNAMIC section index. ~0U if not present. */
++ unsigned iShDynamic;
++ /** Number of entries in paDynamic. */
++ unsigned cDynamic;
++ /** The dynamic section (NULL for ET_REL). */
++ Elf_Dyn *paDynamic;
++ /** Program headers (NULL for ET_REL). */
++ Elf_Phdr *paPhdrs;
++
++ /** Info extracted from PT_DYNAMIC and the program headers. */
++ struct
++ {
++ /** DT_RELA/DT_REL. */
++ Elf_Addr uPtrRelocs;
++ /** DT_RELASZ/DT_RELSZ. */
++ Elf_Xword cbRelocs;
++ /** Non-zero if we've seen DT_RELAENT/DT_RELENT. */
++ unsigned cbRelocEntry;
++ /** DT_RELA or DT_REL. */
++ unsigned uRelocType;
++ /** The index of the section header matching DT_RELA/DT_REL. */
++ unsigned idxShRelocs;
++
++ /** DT_JMPREL. */
++ Elf_Addr uPtrJmpRelocs;
++ /** DT_PLTRELSZ. */
++ Elf_Xword cbJmpRelocs;
++ /** DT_RELA or DT_REL (if we've seen DT_PLTREL). */
++ unsigned uJmpRelocType;
++ /** The index of the section header matching DT_JMPREL. */
++ unsigned idxShJmpRelocs;
++ } DynInfo;
++} RTLDRMODELF;
++/** Pointer to an ELF module instance. */
++typedef RTLDRMODELF *PRTLDRMODELF;
+
+
+ /**
+@@ -154,11 +223,15 @@ static int RTLDRELF_NAME(MapBits)(PRTLDR
+ if (RT_SUCCESS(rc))
+ {
+ const uint8_t *pu8 = (const uint8_t *)pModElf->pvBits;
+- if (pModElf->iSymSh != ~0U)
+- pModElf->paSyms = (const Elf_Sym *)(pu8 + pModElf->paShdrs[pModElf->iSymSh].sh_offset);
+- if (pModElf->iStrSh != ~0U)
+- pModElf->pStr = (const char *)(pu8 + pModElf->paShdrs[pModElf->iStrSh].sh_offset);
+- pModElf->pShStr = (const char *)(pu8 + pModElf->paShdrs[pModElf->Ehdr.e_shstrndx].sh_offset);
++ if (pModElf->Rel.iSymSh != ~0U)
++ pModElf->Rel.paSyms = (const Elf_Sym *)(pu8 + pModElf->paShdrs[pModElf->Rel.iSymSh].sh_offset);
++ if (pModElf->Rel.iStrSh != ~0U)
++ pModElf->Rel.pStr = (const char *)(pu8 + pModElf->paShdrs[pModElf->Rel.iStrSh].sh_offset);
++ if (pModElf->Dyn.iSymSh != ~0U)
++ pModElf->Dyn.paSyms = (const Elf_Sym *)(pu8 + pModElf->paShdrs[pModElf->Dyn.iSymSh].sh_offset);
++ if (pModElf->Dyn.iStrSh != ~0U)
++ pModElf->Dyn.pStr = (const char *)(pu8 + pModElf->paShdrs[pModElf->Dyn.iStrSh].sh_offset);
++ pModElf->pShStr = (const char *)(pu8 + pModElf->paShdrs[pModElf->Ehdr.e_shstrndx].sh_offset);
+
+ /*
+ * Verify that the ends of the string tables have a zero terminator
+@@ -167,8 +240,12 @@ static int RTLDRELF_NAME(MapBits)(PRTLDR
+ * sh_offset and sh_size were verfied in RTLDRELF_NAME(ValidateSectionHeader)() already so they
+ * are safe to use.
+ */
+- AssertMsgStmt( pModElf->iStrSh == ~0U
+- || pModElf->pStr[pModElf->paShdrs[pModElf->iStrSh].sh_size - 1] == '\0',
++ AssertMsgStmt( pModElf->Rel.iStrSh == ~0U
++ || pModElf->Rel.pStr[pModElf->paShdrs[pModElf->Rel.iStrSh].sh_size - 1] == '\0',
++ ("The string table is not zero terminated!\n"),
++ rc = VERR_LDRELF_UNTERMINATED_STRING_TAB);
++ AssertMsgStmt( pModElf->Dyn.iStrSh == ~0U
++ || pModElf->Dyn.pStr[pModElf->paShdrs[pModElf->Dyn.iStrSh].sh_size - 1] == '\0',
+ ("The string table is not zero terminated!\n"),
+ rc = VERR_LDRELF_UNTERMINATED_STRING_TAB);
+ AssertMsgStmt(pModElf->pShStr[pModElf->paShdrs[pModElf->Ehdr.e_shstrndx].sh_size - 1] == '\0',
+@@ -180,10 +257,12 @@ static int RTLDRELF_NAME(MapBits)(PRTLDR
+ /* Unmap. */
+ int rc2 = pModElf->Core.pReader->pfnUnmap(pModElf->Core.pReader, pModElf->pvBits);
+ AssertRC(rc2);
+- pModElf->pvBits = NULL;
+- pModElf->paSyms = NULL;
+- pModElf->pStr = NULL;
+- pModElf->pShStr = NULL;
++ pModElf->pvBits = NULL;
++ pModElf->Rel.paSyms = NULL;
++ pModElf->Rel.pStr = NULL;
++ pModElf->Dyn.paSyms = NULL;
++ pModElf->Dyn.pStr = NULL;
++ pModElf->pShStr = NULL;
+ }
+ }
+ return rc;
+@@ -200,6 +279,101 @@ static int RTLDRELF_NAME(MapBits)(PRTLDR
+ *
+ */
+
++/**
++ * Get the symbol and symbol value.
++ *
++ * @returns iprt status code.
++ * @param pModElf The ELF loader module instance data.
++ * @param BaseAddr The base address which the module is being fixedup to.
++ * @param pfnGetImport The callback function to use to resolve imports (aka unresolved externals).
++ * @param pvUser User argument to pass to the callback.
++ * @param iSym The symbol to get.
++ * @param ppSym Where to store the symbol pointer on success. (read only)
++ * @param pSymValue Where to store the symbol value on success.
++ */
++static int RTLDRELF_NAME(SymbolExecDyn)(PRTLDRMODELF pModElf, Elf_Addr BaseAddr, PFNRTLDRIMPORT pfnGetImport, void *pvUser,
++ Elf_Size iSym, const Elf_Sym **ppSym, Elf_Addr *pSymValue)
++{
++ /*
++ * Validate and find the symbol.
++ */
++ AssertMsgReturn(iSym < pModElf->Dyn.cSyms, ("iSym=%d is an invalid symbol index!\n", iSym), VERR_LDRELF_INVALID_SYMBOL_INDEX);
++ const Elf_Sym *pSym = &pModElf->Dyn.paSyms[iSym];
++ *ppSym = pSym;
++
++ AssertMsgReturn(pSym->st_name < pModElf->Dyn.cbStr,
++ ("iSym=%d st_name=%d str sh_size=%d\n", iSym, pSym->st_name, pModElf->Dyn.cbStr),
++ VERR_LDRELF_INVALID_SYMBOL_NAME_OFFSET);
++ const char * const pszName = pModElf->Dyn.pStr + pSym->st_name;
++
++ /*
++ * Determine the symbol value.
++ *
++ * Symbols needs different treatment depending on which section their are in.
++ * Undefined and absolute symbols goes into special non-existing sections.
++ */
++ switch (pSym->st_shndx)
++ {
++ /*
++ * Undefined symbol, needs resolving.
++ *
++ * Since ELF has no generic concept of importing from specific module (the OS/2 ELF format
++ * has but that's an OS extension and only applies to programs and dlls), we'll have to ask
++ * the resolver callback to do a global search.
++ */
++ case SHN_UNDEF:
++ {
++ /* Try to resolve the symbol. */
++ RTUINTPTR Value;
++ int rc = pfnGetImport(&pModElf->Core, "", pszName, ~0U, &Value, pvUser);
++ AssertMsgRCReturn(rc, ("Failed to resolve '%s' (iSym=" FMT_ELF_SIZE " rc=%Rrc\n", pszName, iSym, rc), rc);
++
++ *pSymValue = (Elf_Addr)Value;
++ AssertMsgReturn((RTUINTPTR)*pSymValue == Value,
++ ("Symbol value overflowed! '%s' (iSym=" FMT_ELF_SIZE "\n", pszName, iSym), VERR_SYMBOL_VALUE_TOO_BIG);
++
++ Log2(("rtldrELF: #%-3d - UNDEF " FMT_ELF_ADDR " '%s'\n", iSym, *pSymValue, pszName));
++ break;
++ }
++
++ /*
++ * Absolute symbols needs no fixing since they are, well, absolute.
++ */
++ case SHN_ABS:
++ *pSymValue = pSym->st_value;
++ Log2(("rtldrELF: #%-3d - ABS " FMT_ELF_ADDR " '%s'\n", iSym, *pSymValue, pszName));
++ break;
++
++ /*
++ * All other symbols are addressed relative the image base in DYN and EXEC binaries.
++ */
++ default:
++ AssertMsgReturn(pSym->st_shndx < pModElf->Ehdr.e_shnum,
++ ("iSym=%d st_shndx=%d e_shnum=%d pszName=%s\n", iSym, pSym->st_shndx, pModElf->Ehdr.e_shnum, pszName),
++ VERR_BAD_EXE_FORMAT);
++ *pSymValue = pSym->st_value + BaseAddr;
++ Log2(("rtldrELF: #%-3d - %5d " FMT_ELF_ADDR " '%s'\n", iSym, pSym->st_shndx, *pSymValue, pszName));
++ break;
++ }
++
++ return VINF_SUCCESS;
++}
++
++
++#if ELF_MODE == 32
++/** Helper for RelocateSectionExecDyn. */
++DECLINLINE(const Elf_Shdr *) RTLDRELF_NAME(RvaToSectionHeader)(PRTLDRMODELF pModElf, Elf_Addr uRva)
++{
++ const Elf_Shdr * const pShdrFirst = pModElf->paShdrs;
++ const Elf_Shdr *pShdr = pShdrFirst + pModElf->Ehdr.e_shnum;
++ while (--pShdr != pShdrFirst)
++ if (uRva - pShdr->sh_addr /*rva*/ < pShdr->sh_size)
++ return pShdr;
++ AssertFailed();
++ return pShdr;
++}
++#endif
++
+
+ /**
+ * Applies the fixups for a section in an executable image.
+@@ -230,84 +404,106 @@ static int RTLDRELF_NAME(RelocateSection
+ * Iterate the relocations.
+ * The relocations are stored in an array of Elf32_Rel records and covers the entire relocation section.
+ */
++#if ELF_MODE == 32
++ const Elf_Shdr *pShdr = pModElf->paShdrs;
+ const Elf_Addr offDelta = BaseAddr - pModElf->LinkAddress;
++#endif
+ const Elf_Reloc *paRels = (const Elf_Reloc *)pvRelocs;
+- const unsigned iRelMax = (unsigned)(cbRelocs / sizeof(paRels[0]));
++ const unsigned iRelMax = (unsigned)(cbRelocs / sizeof(paRels[0]));
+ AssertMsgReturn(iRelMax == cbRelocs / sizeof(paRels[0]), (FMT_ELF_SIZE "\n", cbRelocs / sizeof(paRels[0])),
+ VERR_IMAGE_TOO_BIG);
+ for (unsigned iRel = 0; iRel < iRelMax; iRel++)
+ {
+ /*
+- * Skip R_XXX_NONE entries early to avoid confusion in the symbol
+- * getter code.
++ * Apply fixups not taking a symbol (will 'continue' rather than 'break').
+ */
++ AssertMsgReturn(paRels[iRel].r_offset < cbSec, (FMT_ELF_ADDR " " FMT_ELF_SIZE "\n", paRels[iRel].r_offset, cbSec),
++ VERR_LDRELF_INVALID_RELOCATION_OFFSET);
+ #if ELF_MODE == 32
+- if (ELF_R_TYPE(paRels[iRel].r_info) == R_386_NONE)
+- continue;
+-#elif ELF_MODE == 64
+- if (ELF_R_TYPE(paRels[iRel].r_info) == R_X86_64_NONE)
+- continue;
++ if (paRels[iRel].r_offset - pShdr->sh_addr /*rva*/ >= pShdr->sh_size)
++ pShdr = RTLDRELF_NAME(RvaToSectionHeader)(pModElf, paRels[iRel].r_offset);
++ static const Elf_Addr s_uZero = 0;
++ const Elf_Addr *pAddrR = RT_LIKELY(pShdr->sh_type != SHT_NOBITS) /* Where to read the addend. */
++ ? (const Elf_Addr *)(pu8SecBaseR + paRels[iRel].r_offset - pShdr->sh_addr /*rva*/
++ + pShdr->sh_offset)
++ : &s_uZero;
+ #endif
+-
+- /*
+- * Validate and find the symbol, resolve undefined ones.
+- */
+- Elf_Size iSym = ELF_R_SYM(paRels[iRel].r_info);
+- if (iSym >= pModElf->cSyms)
+- {
+- AssertMsgFailed(("iSym=%d is an invalid symbol index!\n", iSym));
+- return VERR_LDRELF_INVALID_SYMBOL_INDEX;
+- }
+- const Elf_Sym *pSym = &pModElf->paSyms[iSym];
+- if (pSym->st_name >= pModElf->cbStr)
++ Elf_Addr *pAddrW = (Elf_Addr *)(pu8SecBaseW + paRels[iRel].r_offset); /* Where to write the fixup. */
++ switch (ELF_R_TYPE(paRels[iRel].r_info))
+ {
+- AssertMsgFailed(("iSym=%d st_name=%d str sh_size=%d\n", iSym, pSym->st_name, pModElf->cbStr));
+- return VERR_LDRELF_INVALID_SYMBOL_NAME_OFFSET;
+- }
++ /*
++ * Image relative (addend + base).
++ */
++#if ELF_MODE == 32
++ case R_386_RELATIVE:
++ {
++ const Elf_Addr Value = *pAddrR + BaseAddr;
++ *(uint32_t *)pAddrW = Value;
++ Log4((FMT_ELF_ADDR "/" FMT_ELF_ADDR7 ": R_386_RELATIVE Value=" FMT_ELF_ADDR "\n",
++ SecAddr + paRels[iRel].r_offset + BaseAddr, paRels[iRel].r_offset, Value));
++ AssertCompile(sizeof(Value) == sizeof(uint32_t));
++ continue;
++ }
++#elif ELF_MODE == 64
++ case R_X86_64_RELATIVE:
++ {
++ const Elf_Addr Value = paRels[iRel].r_addend + BaseAddr;
++ *(uint64_t *)pAddrW = (uint64_t)Value;
++ Log4((FMT_ELF_ADDR "/" FMT_ELF_ADDR7 ": R_X86_64_RELATIVE Value=" FMT_ELF_ADDR "\n",
++ SecAddr + paRels[iRel].r_offset + BaseAddr, paRels[iRel].r_offset, Value));
++ AssertCompile(sizeof(Value) == sizeof(uint64_t));
++ continue;
++ }
++#endif
+
+- Elf_Addr SymValue = 0;
+- if (pSym->st_shndx == SHN_UNDEF)
+- {
+- /* Try to resolve the symbol. */
+- const char *pszName = ELF_STR(pModElf, pSym->st_name);
+- RTUINTPTR ExtValue;
+- int rc = pfnGetImport(&pModElf->Core, "", pszName, ~0U, &ExtValue, pvUser);
+- AssertMsgRCReturn(rc, ("Failed to resolve '%s' rc=%Rrc\n", pszName, rc), rc);
+- SymValue = (Elf_Addr)ExtValue;
+- AssertMsgReturn((RTUINTPTR)SymValue == ExtValue, ("Symbol value overflowed! '%s'\n", pszName),
+- VERR_SYMBOL_VALUE_TOO_BIG);
+- Log2(("rtldrELF: #%-3d - UNDEF " FMT_ELF_ADDR " '%s'\n", iSym, SymValue, pszName));
+- }
+- else
+- {
+- AssertMsgReturn(pSym->st_shndx < pModElf->Ehdr.e_shnum || pSym->st_shndx == SHN_ABS, ("%#x\n", pSym->st_shndx),
+- VERR_LDRELF_INVALID_RELOCATION_OFFSET);
+-#if ELF_MODE == 64
+- SymValue = pSym->st_value;
++ /*
++ * R_XXX_NONE.
++ */
++#if ELF_MODE == 32
++ case R_386_NONE:
++#elif ELF_MODE == 64
++ case R_X86_64_NONE:
+ #endif
++ continue;
+ }
+
+-#if ELF_MODE == 64
+- /* Calc the value (indexes checked above; assumes SHN_UNDEF == 0). */
+- Elf_Addr Value;
+- if (pSym->st_shndx < pModElf->Ehdr.e_shnum)
+- Value = SymValue + offDelta;
+- else /* SHN_ABS: */
+- Value = SymValue + paRels[iRel].r_addend;
+-#endif
++ /*
++ * Validate and find the symbol, resolve undefined ones.
++ */
++ const Elf_Sym *pSym = NULL; /* shut up gcc */
++ Elf_Addr SymValue = 0; /* shut up gcc-4 */
++ int rc = RTLDRELF_NAME(SymbolExecDyn)(pModElf, BaseAddr, pfnGetImport, pvUser, ELF_R_SYM(paRels[iRel].r_info), &pSym, &SymValue);
++ if (RT_FAILURE(rc))
++ return rc;
+
+ /*
+ * Apply the fixup.
+ */
+- AssertMsgReturn(paRels[iRel].r_offset < cbSec, (FMT_ELF_ADDR " " FMT_ELF_SIZE "\n", paRels[iRel].r_offset, cbSec), VERR_LDRELF_INVALID_RELOCATION_OFFSET);
+-#if ELF_MODE == 32
+- const Elf_Addr *pAddrR = (const Elf_Addr *)(pu8SecBaseR + paRels[iRel].r_offset); /* Where to read the addend. */
+-#endif
+- Elf_Addr *pAddrW = (Elf_Addr *)(pu8SecBaseW + paRels[iRel].r_offset); /* Where to write the fixup. */
+ switch (ELF_R_TYPE(paRels[iRel].r_info))
+ {
+ #if ELF_MODE == 32
+ /*
++ * GOT/PLT.
++ */
++ case R_386_GLOB_DAT:
++ {
++ *(uint32_t *)pAddrW = (uint32_t)SymValue;
++ Log4((FMT_ELF_ADDR "/" FMT_ELF_ADDR7 ": R_386_GLOB_DAT Value=" FMT_ELF_ADDR "\n",
++ SecAddr + paRels[iRel].r_offset + BaseAddr, paRels[iRel].r_offset, SymValue));
++ AssertCompile(sizeof(SymValue) == sizeof(uint32_t));
++ break;
++ }
++
++ case R_386_JMP_SLOT:
++ {
++ *(uint32_t *)pAddrW = (uint32_t)SymValue;
++ Log4((FMT_ELF_ADDR "/" FMT_ELF_ADDR7 ": R_386_JMP_SLOT Value=" FMT_ELF_ADDR "\n",
++ SecAddr + paRels[iRel].r_offset + BaseAddr, paRels[iRel].r_offset, SymValue));
++ AssertCompile(sizeof(SymValue) == sizeof(uint32_t));
++ break;
++ }
++
++ /*
+ * Absolute addressing.
+ */
+ case R_386_32:
+@@ -322,7 +518,8 @@ static int RTLDRELF_NAME(RelocateSection
+ else
+ AssertFailedReturn(VERR_LDR_GENERAL_FAILURE); /** @todo SHN_COMMON */
+ *(uint32_t *)pAddrW = Value;
+- Log4((FMT_ELF_ADDR": R_386_32 Value=" FMT_ELF_ADDR "\n", SecAddr + paRels[iRel].r_offset + BaseAddr, Value));
++ Log4((FMT_ELF_ADDR "/" FMT_ELF_ADDR7 ": R_386_32 Value=" FMT_ELF_ADDR "\n",
++ SecAddr + paRels[iRel].r_offset + BaseAddr, paRels[iRel].r_offset, Value));
+ break;
+ }
+
+@@ -344,20 +541,42 @@ static int RTLDRELF_NAME(RelocateSection
+ }
+ else
+ AssertFailedReturn(VERR_LDR_GENERAL_FAILURE); /** @todo SHN_COMMON */
+- Log4((FMT_ELF_ADDR": R_386_PC32 Value=" FMT_ELF_ADDR "\n", SecAddr + paRels[iRel].r_offset + BaseAddr, Value));
++ Log4((FMT_ELF_ADDR "/" FMT_ELF_ADDR7 ": R_386_PC32 Value=" FMT_ELF_ADDR "\n",
++ SecAddr + paRels[iRel].r_offset + BaseAddr, paRels[iRel].r_offset, Value));
+ break;
+ }
+
+ #elif ELF_MODE == 64
++ /*
++ * GOT/PLT.
++ */
++ case R_X86_64_GLOB_DAT:
++ {
++ *(uint64_t *)pAddrW = (uint64_t)SymValue;
++ Log4((FMT_ELF_ADDR "/" FMT_ELF_ADDR7 ": R_X86_64_GLOB_DAT Value=" FMT_ELF_ADDR "\n",
++ SecAddr + paRels[iRel].r_offset + BaseAddr, paRels[iRel].r_offset, SymValue));
++ AssertCompile(sizeof(SymValue) == sizeof(uint64_t));
++ break;
++ }
++
++ case R_X86_64_JMP_SLOT:
++ {
++ *(uint64_t *)pAddrW = (uint64_t)SymValue;
++ Log4((FMT_ELF_ADDR "/" FMT_ELF_ADDR7 ": R_X86_64_JMP_SLOT Value=" FMT_ELF_ADDR "\n",
++ SecAddr + paRels[iRel].r_offset + BaseAddr, paRels[iRel].r_offset, SymValue));
++ AssertCompile(sizeof(SymValue) == sizeof(uint64_t));
++ break;
++ }
+
+ /*
+- * Absolute addressing
++ * Absolute addressing.
+ */
+ case R_X86_64_64:
+ {
++ const Elf_Addr Value = SymValue + paRels[iRel].r_addend;
+ *(uint64_t *)pAddrW = Value;
+- Log4((FMT_ELF_ADDR": R_X86_64_64 Value=" FMT_ELF_ADDR " SymValue=" FMT_ELF_ADDR "\n",
+- SecAddr + paRels[iRel].r_offset + BaseAddr, Value, SymValue));
++ Log4((FMT_ELF_ADDR "/" FMT_ELF_ADDR7 ": R_X86_64_64 Value=" FMT_ELF_ADDR " SymValue=" FMT_ELF_ADDR "\n",
++ SecAddr + paRels[iRel].r_offset + BaseAddr, paRels[iRel].r_offset, Value, SymValue));
+ break;
+ }
+
+@@ -366,9 +585,10 @@ static int RTLDRELF_NAME(RelocateSection
+ */
+ case R_X86_64_32:
+ {
++ const Elf_Addr Value = SymValue + paRels[iRel].r_addend;
+ *(uint32_t *)pAddrW = (uint32_t)Value;
+- Log4((FMT_ELF_ADDR": R_X86_64_32 Value=" FMT_ELF_ADDR " SymValue=" FMT_ELF_ADDR "\n",
+- SecAddr + paRels[iRel].r_offset + BaseAddr, Value, SymValue));
++ Log4((FMT_ELF_ADDR "/" FMT_ELF_ADDR7 ": R_X86_64_32 Value=" FMT_ELF_ADDR " SymValue=" FMT_ELF_ADDR "\n",
++ SecAddr + paRels[iRel].r_offset + BaseAddr, paRels[iRel].r_offset, Value, SymValue));
+ AssertMsgReturn((Elf_Addr)*(uint32_t *)pAddrW == SymValue, ("Value=" FMT_ELF_ADDR "\n", SymValue),
+ VERR_SYMBOL_VALUE_TOO_BIG);
+ break;
+@@ -379,9 +599,10 @@ static int RTLDRELF_NAME(RelocateSection
+ */
+ case R_X86_64_32S:
+ {
++ const Elf_Addr Value = SymValue + paRels[iRel].r_addend;
+ *(int32_t *)pAddrW = (int32_t)Value;
+- Log4((FMT_ELF_ADDR": R_X86_64_32S Value=" FMT_ELF_ADDR " SymValue=" FMT_ELF_ADDR "\n",
+- SecAddr + paRels[iRel].r_offset + BaseAddr, Value, SymValue));
++ Log4((FMT_ELF_ADDR "/" FMT_ELF_ADDR7 ": R_X86_64_32S Value=" FMT_ELF_ADDR " SymValue=" FMT_ELF_ADDR "\n",
++ SecAddr + paRels[iRel].r_offset + BaseAddr, paRels[iRel].r_offset, Value, SymValue));
+ AssertMsgReturn((Elf_Addr)*(int32_t *)pAddrW == Value, ("Value=" FMT_ELF_ADDR "\n", Value), VERR_SYMBOL_VALUE_TOO_BIG); /** @todo check the sign-extending here. */
+ break;
+ }
+@@ -390,18 +611,17 @@ static int RTLDRELF_NAME(RelocateSection
+ * PC relative addressing.
+ */
+ case R_X86_64_PC32:
+- case R_X86_64_PLT32: /* binutils commit 451875b4f976a527395e9303224c7881b65e12ed feature/regression. */
+ {
+- const Elf_Addr SourceAddr = SecAddr + paRels[iRel].r_offset + BaseAddr; /* Where the source really is. */
+- Value -= SourceAddr;
++ const Elf_Addr SourceAddr = SecAddr + paRels[iRel].r_offset + BaseAddr; /* Where the source really is. */
++ const Elf_Addr Value = SymValue + paRels[iRel].r_addend - SourceAddr;
+ *(int32_t *)pAddrW = (int32_t)Value;
+- Log4((FMT_ELF_ADDR": R_X86_64_PC32 Value=" FMT_ELF_ADDR " SymValue=" FMT_ELF_ADDR "\n",
+- SourceAddr, Value, SymValue));
++ Log4((FMT_ELF_ADDR "/" FMT_ELF_ADDR7 ": R_X86_64_PC32 Value=" FMT_ELF_ADDR " SymValue=" FMT_ELF_ADDR "\n",
++ SourceAddr, paRels[iRel].r_offset, Value, SymValue));
+ AssertMsgReturn((Elf_Addr)*(int32_t *)pAddrW == Value, ("Value=" FMT_ELF_ADDR "\n", Value), VERR_SYMBOL_VALUE_TOO_BIG); /** @todo check the sign-extending here. */
+ break;
+ }
+-#endif
+
++#endif
+ default:
+ AssertMsgFailed(("Unknown relocation type: %d (iRel=%d iRelMax=%d)\n",
+ ELF_R_TYPE(paRels[iRel].r_info), iRel, iRelMax));
+@@ -442,19 +662,13 @@ static int RTLDRELF_NAME(Symbol)(PRTLDRM
+ /*
+ * Validate and find the symbol.
+ */
+- if (iSym >= pModElf->cSyms)
+- {
+- AssertMsgFailed(("iSym=%d is an invalid symbol index!\n", iSym));
+- return VERR_LDRELF_INVALID_SYMBOL_INDEX;
+- }
+- const Elf_Sym *pSym = &pModElf->paSyms[iSym];
++ AssertMsgReturn(iSym < pModElf->Rel.cSyms, ("iSym=%d is an invalid symbol index!\n", iSym), VERR_LDRELF_INVALID_SYMBOL_INDEX);
++ const Elf_Sym *pSym = &pModElf->Rel.paSyms[iSym];
+ *ppSym = pSym;
+
+- if (pSym->st_name >= pModElf->cbStr)
+- {
+- AssertMsgFailed(("iSym=%d st_name=%d str sh_size=%d\n", iSym, pSym->st_name, pModElf->cbStr));
+- return VERR_LDRELF_INVALID_SYMBOL_NAME_OFFSET;
+- }
++ AssertMsgReturn(pSym->st_name < pModElf->Rel.cbStr,
++ ("iSym=%d st_name=%d str sh_size=%d\n", iSym, pSym->st_name, pModElf->Rel.cbStr),
++ VERR_LDRELF_INVALID_SYMBOL_NAME_OFFSET);
+ const char *pszName = ELF_STR(pModElf, pSym->st_name);
+
+ /*
+@@ -469,7 +683,7 @@ static int RTLDRELF_NAME(Symbol)(PRTLDRM
+ * Undefined symbol, needs resolving.
+ *
+ * Since ELF has no generic concept of importing from specific module (the OS/2 ELF format
+- * has but that's a OS extension and only applies to programs and dlls), we'll have to ask
++ * has but that's an OS extension and only applies to programs and dlls), we'll have to ask
+ * the resolver callback to do a global search.
+ */
+ case SHN_UNDEF:
+@@ -477,17 +691,12 @@ static int RTLDRELF_NAME(Symbol)(PRTLDRM
+ /* Try to resolve the symbol. */
+ RTUINTPTR Value;
+ int rc = pfnGetImport(&pModElf->Core, "", pszName, ~0U, &Value, pvUser);
+- if (RT_FAILURE(rc))
+- {
+- AssertMsgFailed(("Failed to resolve '%s' rc=%Rrc\n", pszName, rc));
+- return rc;
+- }
++ AssertMsgRCReturn(rc, ("Failed to resolve '%s' (iSym=" FMT_ELF_SIZE " rc=%Rrc\n", pszName, iSym, rc), rc);
+ *pSymValue = (Elf_Addr)Value;
+- if ((RTUINTPTR)*pSymValue != Value)
+- {
+- AssertMsgFailed(("Symbol value overflowed! '%s'\n", pszName));
+- return VERR_SYMBOL_VALUE_TOO_BIG;
+- }
++
++ AssertMsgReturn((RTUINTPTR)*pSymValue == Value,
++ ("Symbol value overflowed! '%s' (iSym=" FMT_ELF_SIZE ")\n", pszName, iSym),
++ VERR_SYMBOL_VALUE_TOO_BIG);
+
+ Log2(("rtldrELF: #%-3d - UNDEF " FMT_ELF_ADDR " '%s'\n", iSym, *pSymValue, pszName));
+ break;
+@@ -536,9 +745,9 @@ static int RTLDRELF_NAME(Symbol)(PRTLDRM
+ * @param pvRelocs Pointer to where we read the relocations from.
+ * @param cbRelocs Size of the relocations.
+ */
+-static int RTLDRELF_NAME(RelocateSection)(PRTLDRMODELF pModElf, Elf_Addr BaseAddr, PFNRTLDRIMPORT pfnGetImport, void *pvUser,
+- const Elf_Addr SecAddr, Elf_Size cbSec, const uint8_t *pu8SecBaseR, uint8_t *pu8SecBaseW,
+- const void *pvRelocs, Elf_Size cbRelocs)
++static int RTLDRELF_NAME(RelocateSectionRel)(PRTLDRMODELF pModElf, Elf_Addr BaseAddr, PFNRTLDRIMPORT pfnGetImport, void *pvUser,
++ const Elf_Addr SecAddr, Elf_Size cbSec, const uint8_t *pu8SecBaseR,
++ uint8_t *pu8SecBaseW, const void *pvRelocs, Elf_Size cbRelocs)
+ {
+ #if ELF_MODE != 32
+ NOREF(pu8SecBaseR);
+@@ -702,6 +911,18 @@ static DECLCALLBACK(int) RTLDRELF_NAME(C
+ pModElf->paShdrs = NULL;
+ }
+
++ if (pModElf->paPhdrs)
++ {
++ RTMemFree(pModElf->paPhdrs);
++ pModElf->paPhdrs = NULL;
++ }
++
++ if (pModElf->paDynamic)
++ {
++ RTMemFree(pModElf->paDynamic);
++ pModElf->paDynamic = NULL;
++ }
++
+ if (pModElf->pvBits)
+ {
+ pModElf->Core.pReader->pfnUnmap(pModElf->Core.pReader, pModElf->pvBits);
+@@ -721,9 +942,9 @@ static DECLCALLBACK(int) RTLDRELF_NAME(D
+ }
+
+
+-/** @copydoc RTLDROPS::EnumSymbols */
+-static DECLCALLBACK(int) RTLDRELF_NAME(EnumSymbols)(PRTLDRMODINTERNAL pMod, unsigned fFlags, const void *pvBits, RTUINTPTR BaseAddress,
+- PFNRTLDRENUMSYMS pfnCallback, void *pvUser)
++/** @copydoc RTLDROPS::pfnEnumSymbols */
++static DECLCALLBACK(int) RTLDRELF_NAME(EnumSymbols)(PRTLDRMODINTERNAL pMod, unsigned fFlags, const void *pvBits,
++ RTUINTPTR BaseAddress, PFNRTLDRENUMSYMS pfnCallback, void *pvUser)
+ {
+ PRTLDRMODELF pModElf = (PRTLDRMODELF)pMod;
+ NOREF(pvBits);
+@@ -744,8 +965,20 @@ static DECLCALLBACK(int) RTLDRELF_NAME(E
+ /*
+ * Enumerate the symbol table.
+ */
+- const Elf_Sym *paSyms = pModElf->paSyms;
+- unsigned cSyms = pModElf->cSyms;
++ const Elf_Sym *paSyms = pModElf->Rel.paSyms;
++ unsigned cSyms = pModElf->Rel.cSyms;
++ const char *pszzStr = pModElf->Rel.pStr;
++ unsigned cbStr = pModElf->Rel.cbStr;
++ if ( ( !(fFlags & RTLDR_ENUM_SYMBOL_FLAGS_ALL)
++ && pModElf->Dyn.cSyms > 0)
++ || cSyms == 0)
++ {
++ paSyms = pModElf->Dyn.paSyms;
++ cSyms = pModElf->Dyn.cSyms;
++ pszzStr = pModElf->Dyn.pStr;
++ cbStr = pModElf->Dyn.cbStr;
++ }
++
+ for (unsigned iSym = 1; iSym < cSyms; iSym++)
+ {
+ /*
+@@ -774,22 +1007,21 @@ static DECLCALLBACK(int) RTLDRELF_NAME(E
+ return VERR_BAD_EXE_FORMAT;
+ }
+
+- AssertMsgReturn(paSyms[iSym].st_name < pModElf->cbStr,
++ AssertMsgReturn(paSyms[iSym].st_name < cbStr,
+ ("String outside string table! iSym=%d paSyms[iSym].st_name=%#x\n", iSym, paSyms[iSym].st_name),
+ VERR_LDRELF_INVALID_SYMBOL_NAME_OFFSET);
++ const char * const pszName = pszzStr + paSyms[iSym].st_name;
+
+- const char *pszName = ELF_STR(pModElf, paSyms[iSym].st_name);
+ /* String termination was already checked when the string table was mapped. */
+- if ( (pszName && *pszName)
++ if ( *pszName != '\0'
+ && ( (fFlags & RTLDR_ENUM_SYMBOL_FLAGS_ALL)
+- || ELF_ST_BIND(paSyms[iSym].st_info) == STB_GLOBAL)
+- )
++ || ELF_ST_BIND(paSyms[iSym].st_info) == STB_GLOBAL) )
+ {
+ /*
+ * Call back.
+ */
+ AssertMsgReturn(Value == (RTUINTPTR)Value, (FMT_ELF_ADDR "\n", Value), VERR_SYMBOL_VALUE_TOO_BIG);
+- rc = pfnCallback(pMod, pszName, ~0U, (RTUINTPTR)Value, pvUser);
++ rc = pfnCallback(pMod, pszName, iSym, (RTUINTPTR)Value, pvUser);
+ if (rc)
+ return rc;
+ }
+@@ -820,13 +1052,11 @@ static DECLCALLBACK(int) RTLDRELF_NAME(G
+ switch (pModElf->Ehdr.e_type)
+ {
+ case ET_REL:
++ case ET_DYN:
+ break;
+ case ET_EXEC:
+ Log(("RTLdrELF: %s: Executable images are not supported yet!\n", pModElf->Core.pReader->pfnLogName(pModElf->Core.pReader)));
+ return VERR_LDRELF_EXEC;
+- case ET_DYN:
+- Log(("RTLdrELF: %s: Dynamic images are not supported yet!\n", pModElf->Core.pReader->pfnLogName(pModElf->Core.pReader)));
+- return VERR_LDRELF_DYN;
+ default: AssertFailedReturn(VERR_BAD_EXE_FORMAT);
+ }
+
+@@ -885,13 +1115,11 @@ static DECLCALLBACK(int) RTLDRELF_NAME(R
+ switch (pModElf->Ehdr.e_type)
+ {
+ case ET_REL:
++ case ET_DYN:
+ break;
+ case ET_EXEC:
+ Log(("RTLdrELF: %s: Executable images are not supported yet!\n", pszLogName));
+ return VERR_LDRELF_EXEC;
+- case ET_DYN:
+- Log(("RTLdrELF: %s: Dynamic images are not supported yet!\n", pszLogName));
+- return VERR_LDRELF_DYN;
+ default: AssertFailedReturn(VERR_BAD_EXE_FORMAT);
+ }
+
+@@ -910,8 +1138,9 @@ static DECLCALLBACK(int) RTLDRELF_NAME(R
+
+ /*
+ * Iterate the sections looking for interesting SHT_REL[A] sections.
+- * SHT_REL[A] sections have the section index of the section they contain fixups
+- * for in the sh_info member.
++ *
++ * In ET_REL files the SHT_REL[A] sections have the section index of
++ * the section they contain fixups for in the sh_info member.
+ */
+ const Elf_Shdr *paShdrs = pModElf->paShdrs;
+ Log2(("rtLdrElf: %s: Fixing up image\n", pszLogName));
+@@ -928,36 +1157,37 @@ static DECLCALLBACK(int) RTLDRELF_NAME(R
+ if (pShdrRel->sh_type != SHT_RELA)
+ #endif
+ continue;
+- if (pShdrRel->sh_info >= pModElf->Ehdr.e_shnum)
+- continue;
+- const Elf_Shdr *pShdr = &paShdrs[pShdrRel->sh_info]; /* the section to fixup. */
+- if (!(pShdr->sh_flags & SHF_ALLOC))
+- continue;
+-
+- /*
+- * Relocate the section.
+- */
+- Log2(("rtldrELF: %s: Relocation records for #%d [%s] (sh_info=%d sh_link=%d) found in #%d [%s] (sh_info=%d sh_link=%d)\n",
+- pszLogName, (int)pShdrRel->sh_info, ELF_SH_STR(pModElf, pShdr->sh_name), (int)pShdr->sh_info, (int)pShdr->sh_link,
+- iShdr, ELF_SH_STR(pModElf, pShdrRel->sh_name), (int)pShdrRel->sh_info, (int)pShdrRel->sh_link));
+-
+- /** @todo Make RelocateSection a function pointer so we can select the one corresponding to the machine when opening the image. */
+ if (pModElf->Ehdr.e_type == ET_REL)
+- rc = RTLDRELF_NAME(RelocateSection)(pModElf, BaseAddr, pfnGetImport, pvUser,
+- pShdr->sh_addr,
+- pShdr->sh_size,
+- (const uint8_t *)pModElf->pvBits + pShdr->sh_offset,
+- (uint8_t *)pvBits + pShdr->sh_addr,
+- (const uint8_t *)pModElf->pvBits + pShdrRel->sh_offset,
+- pShdrRel->sh_size);
++ {
++ if (pShdrRel->sh_info >= pModElf->Ehdr.e_shnum)
++ continue;
++ const Elf_Shdr *pShdr = &paShdrs[pShdrRel->sh_info]; /* the section to fixup. */
++ if (!(pShdr->sh_flags & SHF_ALLOC))
++ continue;
++
++ /*
++ * Relocate the section.
++ */
++ Log2(("rtldrELF: %s: Relocation records for #%d [%s] (sh_info=%d sh_link=%d) found in #%d [%s] (sh_info=%d sh_link=%d)\n",
++ pszLogName, (int)pShdrRel->sh_info, ELF_SH_STR(pModElf, pShdr->sh_name), (int)pShdr->sh_info, (int)pShdr->sh_link,
++ iShdr, ELF_SH_STR(pModElf, pShdrRel->sh_name), (int)pShdrRel->sh_info, (int)pShdrRel->sh_link));
++
++ rc = RTLDRELF_NAME(RelocateSectionRel)(pModElf, BaseAddr, pfnGetImport, pvUser,
++ pShdr->sh_addr,
++ pShdr->sh_size,
++ (const uint8_t *)pModElf->pvBits + pShdr->sh_offset,
++ (uint8_t *)pvBits + pShdr->sh_addr,
++ (const uint8_t *)pModElf->pvBits + pShdrRel->sh_offset,
++ pShdrRel->sh_size);
++ }
+ else
+ rc = RTLDRELF_NAME(RelocateSectionExecDyn)(pModElf, BaseAddr, pfnGetImport, pvUser,
+- pShdr->sh_addr,
+- pShdr->sh_size,
+- (const uint8_t *)pModElf->pvBits + pShdr->sh_offset,
+- (uint8_t *)pvBits + pShdr->sh_addr,
++ 0, (Elf_Size)pModElf->cbImage,
++ (const uint8_t *)pModElf->pvBits /** @todo file offset ?? */,
++ (uint8_t *)pvBits,
+ (const uint8_t *)pModElf->pvBits + pShdrRel->sh_offset,
+ pShdrRel->sh_size);
++
+ if (RT_FAILURE(rc))
+ return rc;
+ }
+@@ -1016,11 +1246,20 @@ static DECLCALLBACK(int) RTLDRELF_NAME(G
+ /*
+ * Calc all kinds of pointers before we start iterating the symbol table.
+ */
+- const Elf_Sym *paSyms = pModElf->paSyms;
+- unsigned cSyms = pModElf->cSyms;
++ const Elf_Sym *paSyms = pModElf->Rel.paSyms;
++ unsigned cSyms = pModElf->Rel.cSyms;
++ const char *pszzStr = pModElf->Rel.pStr;
++ unsigned cbStr = pModElf->Rel.cbStr;
++ if (pModElf->Dyn.cSyms > 0)
++ {
++ paSyms = pModElf->Dyn.paSyms;
++ cSyms = pModElf->Dyn.cSyms;
++ pszzStr = pModElf->Dyn.pStr;
++ cbStr = pModElf->Dyn.cbStr;
++ }
++
+ if (iOrdinal == UINT32_MAX)
+ {
+- const char *pStr = pModElf->pStr;
+ for (unsigned iSym = 1; iSym < cSyms; iSym++)
+ {
+ /* Undefined symbols are not exports, they are imports. */
+@@ -1029,18 +1268,13 @@ static DECLCALLBACK(int) RTLDRELF_NAME(G
+ || ELF_ST_BIND(paSyms[iSym].st_info) == STB_WEAK))
+ {
+ /* Validate the name string and try match with it. */
+- if (paSyms[iSym].st_name < pModElf->cbStr)
+- {
+- if (!strcmp(pszSymbol, pStr + paSyms[iSym].st_name))
+- {
+- /* matched! */
+- return RTLDRELF_NAME(ReturnSymbol)(pModElf, &paSyms[iSym], uBaseAddr, pValue);
+- }
+- }
+- else
++ AssertMsgReturn(paSyms[iSym].st_name < cbStr,
++ ("String outside string table! iSym=%d paSyms[iSym].st_name=%#x\n", iSym, paSyms[iSym].st_name),
++ VERR_LDRELF_INVALID_SYMBOL_NAME_OFFSET);
++ if (!strcmp(pszSymbol, pszzStr + paSyms[iSym].st_name))
+ {
+- AssertMsgFailed(("String outside string table! iSym=%d paSyms[iSym].st_name=%#x\n", iSym, paSyms[iSym].st_name));
+- return VERR_LDRELF_INVALID_SYMBOL_NAME_OFFSET;
++ /* matched! */
++ return RTLDRELF_NAME(ReturnSymbol)(pModElf, &paSyms[iSym], uBaseAddr, pValue);
+ }
+ }
+ }
+@@ -1127,23 +1361,47 @@ static DECLCALLBACK(int) RTLDRELF_NAME(E
+
+
+ /**
+- * Helper that locates the first allocated section.
++ * Locate the next allocated section by RVA (sh_addr).
++ *
++ * This is a helper for EnumSegments and SegOffsetToRva.
+ *
+ * @returns Pointer to the section header if found, NULL if none.
+- * @param pShdr The section header to start searching at.
+- * @param cLeft The number of section headers left to search. Can be 0.
++ * @param pModElf The module instance.
++ * @param iShdrCur The current section header.
+ */
+-static const Elf_Shdr *RTLDRELF_NAME(GetFirstAllocatedSection)(const Elf_Shdr *pShdr, unsigned cLeft)
++static const Elf_Shdr *RTLDRELF_NAME(GetNextAllocatedSection)(PRTLDRMODELF pModElf, unsigned iShdrCur)
+ {
+- while (cLeft-- > 0)
++ unsigned const cShdrs = pModElf->Ehdr.e_shnum;
++ const Elf_Shdr * const paShdrs = pModElf->paShdrs;
++ if (pModElf->fShdrInOrder)
++ {
++ for (unsigned iShdr = iShdrCur + 1; iShdr < cShdrs; iShdr++)
++ if (paShdrs[iShdr].sh_flags & SHF_ALLOC)
++ return &paShdrs[iShdr];
++ }
++ else
+ {
+- if (pShdr->sh_flags & SHF_ALLOC)
+- return pShdr;
+- pShdr++;
++ Elf_Addr const uEndCur = paShdrs[iShdrCur].sh_addr + paShdrs[iShdrCur].sh_size;
++ Elf_Addr offBest = ~(Elf_Addr)0;
++ unsigned iBest = cShdrs;
++ for (unsigned iShdr = pModElf->iFirstSect; iShdr < cShdrs; iShdr++)
++ if ((paShdrs[iShdr].sh_flags & SHF_ALLOC) && iShdr != iShdrCur)
++ {
++ Elf_Addr const offDelta = paShdrs[iShdr].sh_addr - uEndCur;
++ if ( offDelta < offBest
++ && paShdrs[iShdr].sh_addr >= uEndCur)
++ {
++ offBest = offDelta;
++ iBest = iShdr;
++ }
++ }
++ if (iBest < cShdrs)
++ return &paShdrs[iBest];
+ }
+ return NULL;
+ }
+
++
+ /** @copydoc RTLDROPS::pfnEnumSegments. */
+ static DECLCALLBACK(int) RTLDRELF_NAME(EnumSegments)(PRTLDRMODINTERNAL pMod, PFNRTLDRENUMSEGS pfnCallback, void *pvUser)
+ {
+@@ -1163,15 +1421,23 @@ static DECLCALLBACK(int) RTLDRELF_NAME(E
+ Elf_Addr uPrevMappedRva = 0;
+ const Elf_Shdr *paShdrs = pModElf->paShdrs;
+ const Elf_Shdr *paOrgShdrs = pModElf->paOrgShdrs;
+- for (unsigned iShdr = 1; iShdr < pModElf->Ehdr.e_shnum; iShdr++)
++ for (unsigned iShdr = pModElf->iFirstSect; iShdr < pModElf->Ehdr.e_shnum; iShdr++)
+ {
+ RTLDRSEG Seg;
+- Seg.pszName = ELF_SH_STR(pModElf, paShdrs[iShdr].sh_name);
+- Seg.cchName = (uint32_t)strlen(Seg.pszName);
+- if (Seg.cchName == 0)
++ if (iShdr != 0)
++ {
++ Seg.pszName = ELF_SH_STR(pModElf, paShdrs[iShdr].sh_name);
++ Seg.cchName = (uint32_t)strlen(Seg.pszName);
++ if (Seg.cchName == 0)
++ {
++ Seg.pszName = szName;
++ Seg.cchName = (uint32_t)RTStrPrintf(szName, sizeof(szName), "UnamedSect%02u", iShdr);
++ }
++ }
++ else
+ {
+- Seg.pszName = szName;
+- Seg.cchName = (uint32_t)RTStrPrintf(szName, sizeof(szName), "UnamedSect%02u", iShdr);
++ Seg.pszName = ".elf.headers";
++ Seg.cchName = 12;
+ }
+ Seg.SelFlat = 0;
+ Seg.Sel16bit = 0;
+@@ -1187,14 +1453,11 @@ static DECLCALLBACK(int) RTLDRELF_NAME(E
+ {
+ Seg.LinkAddress = paOrgShdrs[iShdr].sh_addr;
+ Seg.RVA = paShdrs[iShdr].sh_addr;
+- const Elf_Shdr *pShdr2 = RTLDRELF_NAME(GetFirstAllocatedSection)(&paShdrs[iShdr + 1],
+- pModElf->Ehdr.e_shnum - iShdr - 1);
+- if ( pShdr2
+- && pShdr2->sh_addr >= paShdrs[iShdr].sh_addr
+- && Seg.RVA >= uPrevMappedRva)
++ const Elf_Shdr *pShdr2 = RTLDRELF_NAME(GetNextAllocatedSection)(pModElf, iShdr);
++ if (pShdr2)
+ Seg.cbMapped = pShdr2->sh_addr - paShdrs[iShdr].sh_addr;
+ else
+- Seg.cbMapped = RT_MAX(paShdrs[iShdr].sh_size, paShdrs[iShdr].sh_addralign);
++ Seg.cbMapped = pModElf->cbImage - paShdrs[iShdr].sh_addr;
+ uPrevMappedRva = Seg.RVA;
+ }
+ else
+@@ -1230,10 +1493,11 @@ static DECLCALLBACK(int) RTLDRELF_NAME(L
+ PRTLDRMODELF pModElf = (PRTLDRMODELF)pMod;
+
+ const Elf_Shdr *pShdrEnd = NULL;
+- unsigned cLeft = pModElf->Ehdr.e_shnum - 1;
+- const Elf_Shdr *pShdr = &pModElf->paOrgShdrs[cLeft];
++ unsigned cLeft = pModElf->Ehdr.e_shnum - pModElf->iFirstSect;
++ const Elf_Shdr *pShdr = &pModElf->paOrgShdrs[pModElf->Ehdr.e_shnum];
+ while (cLeft-- > 0)
+ {
++ pShdr--;
+ if (pShdr->sh_flags & SHF_ALLOC)
+ {
+ RTLDRADDR offSeg = LinkAddress - pShdr->sh_addr;
+@@ -1246,13 +1510,12 @@ static DECLCALLBACK(int) RTLDRELF_NAME(L
+ if (offSeg == pShdr->sh_size)
+ pShdrEnd = pShdr;
+ }
+- pShdr--;
+ }
+
+ if (pShdrEnd)
+ {
+ *poffSeg = pShdrEnd->sh_size;
+- *piSeg = pShdrEnd - pModElf->paOrgShdrs - 1;
++ *piSeg = pShdrEnd - pModElf->paOrgShdrs - pModElf->iFirstSect;
+ return VINF_SUCCESS;
+ }
+
+@@ -1268,7 +1531,7 @@ static DECLCALLBACK(int) RTLDRELF_NAME(L
+ RTLDRADDR offSeg;
+ int rc = RTLDRELF_NAME(LinkAddressToSegOffset)(pMod, LinkAddress, &iSeg, &offSeg);
+ if (RT_SUCCESS(rc))
+- *pRva = pModElf->paShdrs[iSeg + 1].sh_addr + offSeg;
++ *pRva = pModElf->paShdrs[iSeg + pModElf->iFirstSect].sh_addr + offSeg;
+ return rc;
+ }
+
+@@ -1278,14 +1541,13 @@ static DECLCALLBACK(int) RTLDRELF_NAME(S
+ PRTLDRADDR pRva)
+ {
+ PRTLDRMODELF pModElf = (PRTLDRMODELF)pMod;
+- if (iSeg >= pModElf->Ehdr.e_shnum - 1U)
++ if (iSeg >= pModElf->Ehdr.e_shnum - pModElf->iFirstSect)
+ return VERR_LDR_INVALID_SEG_OFFSET;
+
+- iSeg++; /* skip section 0 */
++ iSeg += pModElf->iFirstSect; /* skip section 0 if not used */
+ if (offSeg > pModElf->paShdrs[iSeg].sh_size)
+ {
+- const Elf_Shdr *pShdr2 = RTLDRELF_NAME(GetFirstAllocatedSection)(&pModElf->paShdrs[iSeg + 1],
+- pModElf->Ehdr.e_shnum - iSeg - 1);
++ const Elf_Shdr *pShdr2 = RTLDRELF_NAME(GetNextAllocatedSection)(pModElf, iSeg);
+ if ( !pShdr2
+ || offSeg > (pShdr2->sh_addr - pModElf->paShdrs[iSeg].sh_addr))
+ return VERR_LDR_INVALID_SEG_OFFSET;
+@@ -1303,13 +1565,13 @@ static DECLCALLBACK(int) RTLDRELF_NAME(S
+ static DECLCALLBACK(int) RTLDRELF_NAME(RvaToSegOffset)(PRTLDRMODINTERNAL pMod, RTLDRADDR Rva,
+ uint32_t *piSeg, PRTLDRADDR poffSeg)
+ {
+- PRTLDRMODELF pModElf = (PRTLDRMODELF)pMod;
+-
++ PRTLDRMODELF pModElf = (PRTLDRMODELF)pMod;
+ Elf_Addr PrevAddr = 0;
+- unsigned cLeft = pModElf->Ehdr.e_shnum - 1;
+- const Elf_Shdr *pShdr = &pModElf->paShdrs[cLeft];
++ unsigned cLeft = pModElf->Ehdr.e_shnum - pModElf->iFirstSect;
++ const Elf_Shdr *pShdr = &pModElf->paShdrs[pModElf->Ehdr.e_shnum];
+ while (cLeft-- > 0)
+ {
++ pShdr--;
+ if (pShdr->sh_flags & SHF_ALLOC)
+ {
+ Elf_Addr cbSeg = PrevAddr ? PrevAddr - pShdr->sh_addr : pShdr->sh_size;
+@@ -1322,7 +1584,6 @@ static DECLCALLBACK(int) RTLDRELF_NAME(R
+ }
+ PrevAddr = pShdr->sh_addr;
+ }
+- pShdr--;
+ }
+
+ return VERR_LDR_INVALID_RVA;
+@@ -1413,14 +1674,14 @@ static DECLCALLBACK(int) RTLDRELF_NAME(R
+ * Apply the relocations.
+ */
+ if (pThis->Ehdr.e_type == ET_REL)
+- rc = RTLDRELF_NAME(RelocateSection)(pThis, pThis->LinkAddress,
+- RTLDRELF_NAME(GetImportStubCallback), NULL /*pvUser*/,
+- pThis->paShdrs[iDbgInfo].sh_addr,
+- pThis->paShdrs[iDbgInfo].sh_size,
+- (const uint8_t *)pvBuf,
+- (uint8_t *)pvBuf,
+- pbRelocs,
+- pThis->paShdrs[iRelocs].sh_size);
++ rc = RTLDRELF_NAME(RelocateSectionRel)(pThis, pThis->LinkAddress,
++ RTLDRELF_NAME(GetImportStubCallback), NULL /*pvUser*/,
++ pThis->paShdrs[iDbgInfo].sh_addr,
++ pThis->paShdrs[iDbgInfo].sh_size,
++ (const uint8_t *)pvBuf,
++ (uint8_t *)pvBuf,
++ pbRelocs,
++ pThis->paShdrs[iRelocs].sh_size);
+ else
+ rc = RTLDRELF_NAME(RelocateSectionExecDyn)(pThis, pThis->LinkAddress,
+ RTLDRELF_NAME(GetImportStubCallback), NULL /*pvUser*/,
+@@ -1562,11 +1823,13 @@ static RTLDROPS RTLDRELF_MID(s_rtldrElf,
+ *
+ * @returns iprt status code.
+ * @param pEhdr Pointer to the ELF header.
+- * @param pszLogName The log name.
+ * @param cbRawImage The size of the raw image.
++ * @param pszLogName The log name.
++ * @param penmArch Where to return the architecture.
++ * @param pErrInfo Where to return extended error info. Optional.
+ */
+-static int RTLDRELF_NAME(ValidateElfHeader)(const Elf_Ehdr *pEhdr, const char *pszLogName, uint64_t cbRawImage,
+- PRTLDRARCH penmArch)
++static int RTLDRELF_NAME(ValidateElfHeader)(const Elf_Ehdr *pEhdr, uint64_t cbRawImage, const char *pszLogName,
++ PRTLDRARCH penmArch, PRTERRINFO pErrInfo)
+ {
+ Log3(("RTLdrELF: e_ident: %.*Rhxs\n"
+ "RTLdrELF: e_type: " FMT_ELF_HALF "\n"
+@@ -1588,48 +1851,31 @@ static int RTLDRELF_NAME(ValidateElfHead
+ if ( pEhdr->e_ident[EI_MAG0] != ELFMAG0
+ || pEhdr->e_ident[EI_MAG1] != ELFMAG1
+ || pEhdr->e_ident[EI_MAG2] != ELFMAG2
+- || pEhdr->e_ident[EI_MAG3] != ELFMAG3
+- )
+- {
+- Log(("RTLdrELF: %s: Invalid ELF magic (%.*Rhxs)\n", pszLogName, sizeof(pEhdr->e_ident), pEhdr->e_ident)); NOREF(pszLogName);
+- return VERR_BAD_EXE_FORMAT;
+- }
++ || pEhdr->e_ident[EI_MAG3] != ELFMAG3)
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: Invalid ELF magic (%.*Rhxs)", pszLogName, sizeof(pEhdr->e_ident), pEhdr->e_ident);
+ if (pEhdr->e_ident[EI_CLASS] != RTLDRELF_SUFF(ELFCLASS))
+- {
+- Log(("RTLdrELF: %s: Invalid ELF class (%.*Rhxs)\n", pszLogName, sizeof(pEhdr->e_ident), pEhdr->e_ident));
+- return VERR_BAD_EXE_FORMAT;
+- }
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: Invalid ELF class (%.*Rhxs)", pszLogName, sizeof(pEhdr->e_ident), pEhdr->e_ident);
+ if (pEhdr->e_ident[EI_DATA] != ELFDATA2LSB)
+- {
+- Log(("RTLdrELF: %s: ELF endian %x is unsupported\n", pszLogName, pEhdr->e_ident[EI_DATA]));
+- return VERR_LDRELF_ODD_ENDIAN;
+- }
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_LDRELF_ODD_ENDIAN,
++ "%s: ELF endian %x is unsupported", pszLogName, pEhdr->e_ident[EI_DATA]);
+ if (pEhdr->e_version != EV_CURRENT)
+- {
+- Log(("RTLdrELF: %s: ELF version %x is unsupported\n", pszLogName, pEhdr->e_version));
+- return VERR_LDRELF_VERSION;
+- }
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_LDRELF_VERSION,
++ "%s: ELF version %x is unsupported", pszLogName, pEhdr->e_version);
+
+ if (sizeof(Elf_Ehdr) != pEhdr->e_ehsize)
+- {
+- Log(("RTLdrELF: %s: Elf header e_ehsize is %d expected %d!\n",
+- pszLogName, pEhdr->e_ehsize, sizeof(Elf_Ehdr)));
+- return VERR_BAD_EXE_FORMAT;
+- }
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: Elf header e_ehsize is %d expected %d!", pszLogName, pEhdr->e_ehsize, sizeof(Elf_Ehdr));
+ if ( sizeof(Elf_Phdr) != pEhdr->e_phentsize
+- && ( pEhdr->e_phnum != 0
+- || pEhdr->e_type == ET_DYN))
+- {
+- Log(("RTLdrELF: %s: Elf header e_phentsize is %d expected %d!\n",
+- pszLogName, pEhdr->e_phentsize, sizeof(Elf_Phdr)));
+- return VERR_BAD_EXE_FORMAT;
+- }
++ && ( pEhdr->e_phnum != 0
++ || pEhdr->e_type == ET_DYN
++ || pEhdr->e_type == ET_EXEC))
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "%s: Elf header e_phentsize is %d expected %d!",
++ pszLogName, pEhdr->e_phentsize, sizeof(Elf_Phdr));
+ if (sizeof(Elf_Shdr) != pEhdr->e_shentsize)
+- {
+- Log(("RTLdrELF: %s: Elf header e_shentsize is %d expected %d!\n",
+- pszLogName, pEhdr->e_shentsize, sizeof(Elf_Shdr)));
+- return VERR_BAD_EXE_FORMAT;
+- }
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "%s: Elf header e_shentsize is %d expected %d!",
++ pszLogName, pEhdr->e_shentsize, sizeof(Elf_Shdr));
+
+ switch (pEhdr->e_type)
+ {
+@@ -1638,8 +1884,8 @@ static int RTLDRELF_NAME(ValidateElfHead
+ case ET_DYN:
+ break;
+ default:
+- Log(("RTLdrELF: %s: image type %#x is not supported!\n", pszLogName, pEhdr->e_type));
+- return VERR_BAD_EXE_FORMAT;
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "%s: image type %#x is not supported!",
++ pszLogName, pEhdr->e_type);
+ }
+
+ switch (pEhdr->e_machine)
+@@ -1655,52 +1901,43 @@ static int RTLDRELF_NAME(ValidateElfHead
+ break;
+ #endif
+ default:
+- Log(("RTLdrELF: %s: machine type %u is not supported!\n", pszLogName, pEhdr->e_machine));
+- return VERR_LDRELF_MACHINE;
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_LDRELF_MACHINE,
++ "%s: machine type %u is not supported!", pszLogName, pEhdr->e_machine);
+ }
+
+ if ( pEhdr->e_phoff < pEhdr->e_ehsize
+ && !(pEhdr->e_phoff && pEhdr->e_phnum)
+ && pEhdr->e_phnum)
+- {
+- Log(("RTLdrELF: %s: The program headers overlap with the ELF header! e_phoff=" FMT_ELF_OFF "\n",
+- pszLogName, pEhdr->e_phoff));
+- return VERR_BAD_EXE_FORMAT;
+- }
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: The program headers overlap with the ELF header! e_phoff=" FMT_ELF_OFF,
++ pszLogName, pEhdr->e_phoff);
+ if ( pEhdr->e_phoff + pEhdr->e_phnum * pEhdr->e_phentsize > cbRawImage
+ || pEhdr->e_phoff + pEhdr->e_phnum * pEhdr->e_phentsize < pEhdr->e_phoff)
+- {
+- Log(("RTLdrELF: %s: The program headers extends beyond the file! e_phoff=" FMT_ELF_OFF " e_phnum=" FMT_ELF_HALF "\n",
+- pszLogName, pEhdr->e_phoff, pEhdr->e_phnum));
+- return VERR_BAD_EXE_FORMAT;
+- }
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: The program headers extends beyond the file! e_phoff=" FMT_ELF_OFF " e_phnum=" FMT_ELF_HALF,
++ pszLogName, pEhdr->e_phoff, pEhdr->e_phnum);
+
+
+ if ( pEhdr->e_shoff < pEhdr->e_ehsize
+ && !(pEhdr->e_shoff && pEhdr->e_shnum))
+- {
+- Log(("RTLdrELF: %s: The section headers overlap with the ELF header! e_shoff=" FMT_ELF_OFF "\n",
+- pszLogName, pEhdr->e_shoff));
+- return VERR_BAD_EXE_FORMAT;
+- }
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: The section headers overlap with the ELF header! e_shoff=" FMT_ELF_OFF,
++ pszLogName, pEhdr->e_shoff);
+ if ( pEhdr->e_shoff + pEhdr->e_shnum * pEhdr->e_shentsize > cbRawImage
+ || pEhdr->e_shoff + pEhdr->e_shnum * pEhdr->e_shentsize < pEhdr->e_shoff)
+- {
+- Log(("RTLdrELF: %s: The section headers extends beyond the file! e_shoff=" FMT_ELF_OFF " e_shnum=" FMT_ELF_HALF "\n",
+- pszLogName, pEhdr->e_shoff, pEhdr->e_shnum));
+- return VERR_BAD_EXE_FORMAT;
+- }
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: The section headers extends beyond the file! e_shoff=" FMT_ELF_OFF " e_shnum=" FMT_ELF_HALF,
++ pszLogName, pEhdr->e_shoff, pEhdr->e_shnum);
+
+ if (pEhdr->e_shstrndx == 0 || pEhdr->e_shstrndx > pEhdr->e_shnum)
+- {
+- Log(("RTLdrELF: %s: The section headers string table is out of bounds! e_shstrndx=" FMT_ELF_HALF " e_shnum=" FMT_ELF_HALF "\n",
+- pszLogName, pEhdr->e_shstrndx, pEhdr->e_shnum));
+- return VERR_BAD_EXE_FORMAT;
+- }
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: The section headers string table is out of bounds! e_shstrndx=" FMT_ELF_HALF " e_shnum=" FMT_ELF_HALF,
++ pszLogName, pEhdr->e_shstrndx, pEhdr->e_shnum);
+
+ return VINF_SUCCESS;
+ }
+
++
+ /**
+ * Gets the section header name.
+ *
+@@ -1741,10 +1978,12 @@ const char *RTLDRELF_NAME(GetSHdrName)(P
+ * @param pModElf Pointer to the module structure.
+ * @param iShdr The index of section header which should be validated.
+ * The section headers are found in the pModElf->paShdrs array.
+- * @param pszLogName The log name.
+ * @param cbRawImage The size of the raw image.
++ * @param pszLogName The log name.
++ * @param pErrInfo Where to return extended error info. Optional.
+ */
+-static int RTLDRELF_NAME(ValidateSectionHeader)(PRTLDRMODELF pModElf, unsigned iShdr, const char *pszLogName, uint64_t cbRawImage)
++static int RTLDRELF_NAME(ValidateSectionHeader)(PRTLDRMODELF pModElf, unsigned iShdr, uint64_t cbRawImage,
++ const char *pszLogName, PRTERRINFO pErrInfo)
+ {
+ const Elf_Shdr *pShdr = &pModElf->paShdrs[iShdr];
+ char szSectionName[80]; NOREF(szSectionName);
+@@ -1776,37 +2015,29 @@ static int RTLDRELF_NAME(ValidateSection
+ || pShdr->sh_link != SHN_UNDEF
+ || pShdr->sh_addralign != 0
+ || pShdr->sh_entsize != 0 )
+- {
+- Log(("RTLdrELF: %s: Bad #0 section: %.*Rhxs\n", pszLogName, sizeof(*pShdr), pShdr ));
+- return VERR_BAD_EXE_FORMAT;
+- }
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: Bad #0 section: %.*Rhxs", pszLogName, sizeof(*pShdr), pShdr);
+ return VINF_SUCCESS;
+ }
+
+ if (pShdr->sh_name >= pModElf->cbShStr)
+- {
+- Log(("RTLdrELF: %s: Shdr #%d: sh_name (%d) is beyond the end of the section header string table (%d)!\n",
+- pszLogName, iShdr, pShdr->sh_name, pModElf->cbShStr)); NOREF(pszLogName);
+- return VERR_BAD_EXE_FORMAT;
+- }
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: Shdr #%d: sh_name (%d) is beyond the end of the section header string table (%d)!",
++ pszLogName, iShdr, pShdr->sh_name, pModElf->cbShStr);
+
+ if (pShdr->sh_link >= pModElf->Ehdr.e_shnum)
+- {
+- Log(("RTLdrELF: %s: Shdr #%d: sh_link (%d) is beyond the end of the section table (%d)!\n",
+- pszLogName, iShdr, pShdr->sh_link, pModElf->Ehdr.e_shnum)); NOREF(pszLogName);
+- return VERR_BAD_EXE_FORMAT;
+- }
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: Shdr #%d: sh_link (%d) is beyond the end of the section table (%d)!",
++ pszLogName, iShdr, pShdr->sh_link, pModElf->Ehdr.e_shnum);
+
+ switch (pShdr->sh_type)
+ {
+ /** @todo find specs and check up which sh_info fields indicates section table entries */
+ case 12301230:
+ if (pShdr->sh_info >= pModElf->Ehdr.e_shnum)
+- {
+- Log(("RTLdrELF: %s: Shdr #%d: sh_info (%d) is beyond the end of the section table (%d)!\n",
+- pszLogName, iShdr, pShdr->sh_link, pModElf->Ehdr.e_shnum));
+- return VERR_BAD_EXE_FORMAT;
+- }
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: Shdr #%d: sh_info (%d) is beyond the end of the section table (%d)!",
++ pszLogName, iShdr, pShdr->sh_link, pModElf->Ehdr.e_shnum);
+ break;
+
+ case SHT_NULL:
+@@ -1840,18 +2071,740 @@ static int RTLDRELF_NAME(ValidateSection
+ uint64_t offEnd = pShdr->sh_offset + pShdr->sh_size;
+ if ( offEnd > cbRawImage
+ || offEnd < (uint64_t)pShdr->sh_offset)
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: Shdr #%d: sh_offset (" FMT_ELF_OFF ") + sh_size (" FMT_ELF_XWORD " = %RX64) is beyond the end of the file (%RX64)!",
++ pszLogName, iShdr, pShdr->sh_offset, pShdr->sh_size, offEnd, cbRawImage);
++ if (pShdr->sh_offset < sizeof(Elf_Ehdr))
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: Shdr #%d: sh_offset (" FMT_ELF_OFF ") + sh_size (" FMT_ELF_XWORD ") is starting in the ELF header!",
++ pszLogName, iShdr, pShdr->sh_offset, pShdr->sh_size);
++ }
++
++ return VINF_SUCCESS;
++}
++
++
++/**
++ * Process the section headers.
++ *
++ * @returns iprt status code.
++ * @param pModElf Pointer to the module structure.
++ * @param paShdrs The section headers.
++ * @param cbRawImage The size of the raw image.
++ * @param pszLogName The log name.
++ * @param pErrInfo Where to return extended error info. Optional.
++ */
++static int RTLDRELF_NAME(ValidateAndProcessSectionHeaders)(PRTLDRMODELF pModElf, Elf_Shdr *paShdrs, uint64_t cbRawImage,
++ const char *pszLogName, PRTERRINFO pErrInfo)
++{
++ Elf_Addr uNextAddr = 0;
++ for (unsigned i = 0; i < pModElf->Ehdr.e_shnum; i++)
++ {
++ int rc = RTLDRELF_NAME(ValidateSectionHeader)(pModElf, i, cbRawImage, pszLogName, pErrInfo);
++ if (RT_FAILURE(rc))
++ return rc;
++
++ /*
++ * We're looking for symbol tables.
++ */
++ if (paShdrs[i].sh_type == SHT_SYMTAB)
+ {
+- Log(("RTLdrELF: %s: Shdr #%d: sh_offset (" FMT_ELF_OFF ") + sh_size (" FMT_ELF_XWORD " = %RX64) is beyond the end of the file (%RX64)!\n",
+- pszLogName, iShdr, pShdr->sh_offset, pShdr->sh_size, offEnd, cbRawImage));
+- return VERR_BAD_EXE_FORMAT;
++ if (pModElf->Rel.iSymSh != ~0U)
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_LDRELF_MULTIPLE_SYMTABS,
++ "%s: Multiple symbol tabs! iSymSh=%d i=%d", pszLogName, pModElf->Rel.iSymSh, i);
++ pModElf->Rel.iSymSh = i;
++ pModElf->Rel.cSyms = (unsigned)(paShdrs[i].sh_size / sizeof(Elf_Sym));
++ AssertBreakStmt(pModElf->Rel.cSyms == paShdrs[i].sh_size / sizeof(Elf_Sym), rc = VERR_IMAGE_TOO_BIG);
++ pModElf->Rel.iStrSh = paShdrs[i].sh_link;
++ pModElf->Rel.cbStr = (unsigned)paShdrs[pModElf->Rel.iStrSh].sh_size;
++ AssertBreakStmt(pModElf->Rel.cbStr == paShdrs[pModElf->Rel.iStrSh].sh_size, rc = VERR_IMAGE_TOO_BIG);
++ }
++ else if (paShdrs[i].sh_type == SHT_DYNSYM)
++ {
++ if (pModElf->Dyn.iSymSh != ~0U)
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_LDRELF_MULTIPLE_SYMTABS,
++ "%s: Multiple dynamic symbol tabs! iSymSh=%d i=%d", pszLogName, pModElf->Dyn.iSymSh, i);
++ if (pModElf->Ehdr.e_type != ET_DYN && pModElf->Ehdr.e_type != ET_EXEC)
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: Unexpected SHT_DYNSYM (i=%d) for e_type=%d", pszLogName, i, pModElf->Ehdr.e_type);
++ pModElf->Dyn.iSymSh = i;
++ pModElf->Dyn.cSyms = (unsigned)(paShdrs[i].sh_size / sizeof(Elf_Sym));
++ AssertBreakStmt(pModElf->Dyn.cSyms == paShdrs[i].sh_size / sizeof(Elf_Sym), rc = VERR_IMAGE_TOO_BIG);
++ pModElf->Dyn.iStrSh = paShdrs[i].sh_link;
++ pModElf->Dyn.cbStr = (unsigned)paShdrs[pModElf->Dyn.iStrSh].sh_size;
++ AssertBreakStmt(pModElf->Dyn.cbStr == paShdrs[pModElf->Dyn.iStrSh].sh_size, rc = VERR_IMAGE_TOO_BIG);
+ }
+- if (pShdr->sh_offset < sizeof(Elf_Ehdr))
++ /*
++ * We're also look for the dynamic section.
++ */
++ else if (paShdrs[i].sh_type == SHT_DYNAMIC)
++ {
++ if (pModElf->iShDynamic != ~0U)
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: Multiple dynamic sections! iShDynamic=%d i=%d",
++ pszLogName, pModElf->iShDynamic, i);
++ if (pModElf->Ehdr.e_type != ET_DYN && pModElf->Ehdr.e_type != ET_EXEC)
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: Unexpected SHT_DYNAMIC (i=%d) for e_type=%d", pszLogName, i, pModElf->Ehdr.e_type);
++ if (paShdrs[i].sh_entsize != sizeof(Elf_Dyn))
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: SHT_DYNAMIC (i=%d) sh_entsize=" FMT_ELF_XWORD ", expected %#zx",
++ pszLogName, i, paShdrs[i].sh_entsize, sizeof(Elf_Dyn));
++ pModElf->iShDynamic = i;
++ Elf_Xword const cDynamic = paShdrs[i].sh_size / sizeof(Elf_Dyn);
++ if (cDynamic > _64K || cDynamic < 2)
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: SHT_DYNAMIC (i=%d) sh_size=" FMT_ELF_XWORD " is out of range (2..64K)",
++ pszLogName, i, paShdrs[i].sh_size);
++ pModElf->cDynamic = (unsigned)cDynamic;
++ }
++
++ /*
++ * Special checks for the section string table.
++ */
++ if (i == pModElf->Ehdr.e_shstrndx)
+ {
+- Log(("RTLdrELF: %s: Shdr #%d: sh_offset (" FMT_ELF_OFF ") + sh_size (" FMT_ELF_XWORD ") is starting in the ELF header!\n",
+- pszLogName, iShdr, pShdr->sh_offset, pShdr->sh_size));
+- return VERR_BAD_EXE_FORMAT;
++ if (paShdrs[i].sh_type != SHT_STRTAB)
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: Section header string table is not a SHT_STRTAB: %#x",
++ pszLogName, paShdrs[i].sh_type);
++ if (paShdrs[i].sh_size == 0)
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "%s: Section header string table is empty", pszLogName);
+ }
++
++ /*
++ * Kluge for the .data..percpu segment in 64-bit linux kernels.
++ */
++ if (paShdrs[i].sh_flags & SHF_ALLOC)
++ {
++ if ( paShdrs[i].sh_addr == 0
++ && paShdrs[i].sh_addr < uNextAddr)
++ {
++ Elf_Addr uAddr = RT_ALIGN_T(uNextAddr, paShdrs[i].sh_addralign, Elf_Addr);
++ Log(("RTLdrElf: Out of order section #%d; adjusting sh_addr from " FMT_ELF_ADDR " to " FMT_ELF_ADDR "\n",
++ i, paShdrs[i].sh_addr, uAddr));
++ paShdrs[i].sh_addr = uAddr;
++ }
++ uNextAddr = paShdrs[i].sh_addr + paShdrs[i].sh_size;
++ }
++ } /* for each section header */
++
++ return VINF_SUCCESS;
++}
++
++
++/**
++ * Process the section headers.
++ *
++ * @returns iprt status code.
++ * @param pModElf Pointer to the module structure.
++ * @param paShdrs The section headers.
++ * @param cbRawImage The size of the raw image.
++ * @param pszLogName The log name.
++ * @param pErrInfo Where to return extended error info. Optional.
++ */
++static int RTLDRELF_NAME(ValidateAndProcessDynamicInfo)(PRTLDRMODELF pModElf, uint64_t cbRawImage, uint32_t fFlags,
++ const char *pszLogName, PRTERRINFO pErrInfo)
++{
++ /*
++ * Check preconditions.
++ */
++ AssertReturn(pModElf->Ehdr.e_type == ET_DYN || pModElf->Ehdr.e_type == ET_EXEC, VERR_INTERNAL_ERROR_2);
++ if (pModElf->Ehdr.e_phnum <= 1 || pModElf->Ehdr.e_phnum >= _32K)
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: e_phnum=%u is out of bounds (2..32K)", pszLogName, pModElf->Ehdr.e_phnum);
++ if (pModElf->iShDynamic == ~0U)
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "%s: no .dynamic section", pszLogName);
++ AssertReturn(pModElf->cDynamic > 1 && pModElf->cDynamic <= _64K, VERR_INTERNAL_ERROR_3);
++
++ /* ASSUME that the sections are ordered by address. That simplifies
++ validation code further down. */
++ AssertReturn(pModElf->Ehdr.e_shnum >= 2, VERR_INTERNAL_ERROR_4);
++ Elf_Shdr const *paShdrs = pModElf->paShdrs;
++ Elf_Addr uPrevEnd = paShdrs[1].sh_addr + paShdrs[1].sh_size;
++ for (unsigned i = 2; i < pModElf->Ehdr.e_shnum; i++)
++ if (paShdrs[i].sh_flags & SHF_ALLOC)
++ {
++ if (uPrevEnd > paShdrs[i].sh_addr)
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: section %u is out of order: uPrevEnd=" FMT_ELF_ADDR " sh_addr=" FMT_ELF_ADDR,
++ pszLogName, i, uPrevEnd, paShdrs[i].sh_addr);
++ uPrevEnd = paShdrs[i].sh_addr + paShdrs[i].sh_size;
++ }
++
++ /* Must have string and symbol tables. */
++ if (pModElf->Dyn.iStrSh == ~0U)
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "%s: No dynamic string table section", pszLogName);
++ if (pModElf->Dyn.iSymSh == ~0U)
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "%s: No dynamic symbol table section", pszLogName);
++
++ /*
++ * Load the program headers.
++ */
++ size_t const cbPhdrs = sizeof(pModElf->paPhdrs[0]) * pModElf->Ehdr.e_phnum;
++ Elf_Phdr *paPhdrs = (Elf_Phdr *)RTMemAllocZ(cbPhdrs);
++ pModElf->paPhdrs = paPhdrs;
++ AssertReturn(paPhdrs, VERR_NO_MEMORY);
++
++ int rc = pModElf->Core.pReader->pfnRead(pModElf->Core.pReader, paPhdrs, cbPhdrs, pModElf->Ehdr.e_phoff);
++ if (RT_FAILURE(rc))
++ return RTERRINFO_LOG_SET_F(pErrInfo, rc, "%s: pfnRead(,,%#zx, " FMT_ELF_OFF ") -> %Rrc",
++ pszLogName, cbPhdrs, pModElf->Ehdr.e_phoff, rc);
++
++ /*
++ * Validate them.
++ */
++ unsigned cbPage = _4K; /** @todo generalize architecture specific stuff using its own code template header. */
++ switch (pModElf->Core.enmArch)
++ {
++ case RTLDRARCH_AMD64:
++ case RTLDRARCH_X86_32:
++ break;
++ default:
++ AssertFailedBreak(/** @todo page size for got.plt hacks */);
+ }
++ unsigned iLoad = 0;
++ unsigned iLoadShdr = 1; /* ASSUMES ordered (checked above). */
++ unsigned cDynamic = 0;
++ Elf_Addr cbImage = 0;
++ Elf_Addr uLinkAddress = ~(Elf_Addr)0;
++ for (unsigned i = 0; i < pModElf->Ehdr.e_phnum; i++)
++ {
++ const Elf_Phdr * const pPhdr = &paPhdrs[i];
++ Log3(("RTLdrELF: Program Header #%d:\n"
++ "RTLdrELF: p_type: " FMT_ELF_WORD " (%s)\n"
++ "RTLdrELF: p_flags: " FMT_ELF_WORD "\n"
++ "RTLdrELF: p_offset: " FMT_ELF_OFF "\n"
++ "RTLdrELF: p_vaddr: " FMT_ELF_ADDR "\n"
++ "RTLdrELF: p_paddr: " FMT_ELF_ADDR "\n"
++ "RTLdrELF: p_filesz: " FMT_ELF_XWORD "\n"
++ "RTLdrELF: p_memsz: " FMT_ELF_XWORD "\n"
++ "RTLdrELF: p_align: " FMT_ELF_XWORD "\n",
++ i,
++ pPhdr->p_type, rtldrElfGetPhdrType(pPhdr->p_type), pPhdr->p_flags, pPhdr->p_offset,
++ pPhdr->p_vaddr, pPhdr->p_paddr, pPhdr->p_filesz, pPhdr->p_memsz, pPhdr->p_align));
++
++ if (pPhdr->p_type == DT_NULL)
++ continue;
++
++ if ( pPhdr->p_filesz != 0
++ && ( pPhdr->p_offset >= cbRawImage
++ || pPhdr->p_filesz > cbRawImage
++ || pPhdr->p_offset + pPhdr->p_filesz > cbRawImage))
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: Prog Hdr #%u: bogus p_offset=" FMT_ELF_OFF " & p_filesz=" FMT_ELF_XWORD " (file size %#RX64)",
++ pszLogName, i, pPhdr->p_offset, pPhdr->p_filesz, cbRawImage);
++
++ if (pPhdr->p_flags & ~(Elf64_Word)(PF_X | PF_R | PF_W))
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "%s: Prog Hdr #%u: bogus p_flags=" FMT_ELF_WORD,
++ pszLogName, i, pPhdr->p_flags);
++
++ if (!RT_IS_POWER_OF_TWO(pPhdr->p_align))
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "%s: Prog Hdr #%u: bogus p_align=" FMT_ELF_XWORD,
++ pszLogName, i, pPhdr->p_align);
++
++ if ( pPhdr->p_align > 1
++ && pPhdr->p_memsz > 0
++ && pPhdr->p_filesz > 0
++ && (pPhdr->p_offset & (pPhdr->p_align - 1)) != (pPhdr->p_vaddr & (pPhdr->p_align - 1)))
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: Prog Hdr #%u: misaligned p_offset=" FMT_ELF_OFF " p_vaddr=" FMT_ELF_ADDR " p_align=" FMT_ELF_XWORD,
++ pszLogName, i, pPhdr->p_offset, pPhdr->p_vaddr, pPhdr->p_align);
++
++ /* Do some type specfic checks: */
++ switch (pPhdr->p_type)
++ {
++ case PT_LOAD:
++ {
++ if (pPhdr->p_memsz < pPhdr->p_filesz)
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: Prog Hdr #%u/LOAD#%u: bogus p_memsz=" FMT_ELF_XWORD " or p_filesz=" FMT_ELF_XWORD,
++ pszLogName, i, iLoad, pPhdr->p_memsz, pPhdr->p_filesz);
++ cbImage = pPhdr->p_vaddr + pPhdr->p_memsz;
++ if (iLoad == 0)
++ uLinkAddress = pPhdr->p_vaddr;
++
++ /* Find the corresponding sections, checking their addresses and
++ file offsets since the rest of the code is still section based
++ rather than using program headers as it should... */
++ Elf_Off off = pPhdr->p_offset;
++ Elf_Addr uAddr = pPhdr->p_vaddr;
++ Elf_Xword cbMem = pPhdr->p_memsz;
++ Elf_Xword cbFile = pPhdr->p_filesz;
++ while (cbMem > 0)
++ {
++ if (iLoadShdr < pModElf->Ehdr.e_shnum)
++ { /* likely */ }
++ else if (iLoadShdr == pModElf->Ehdr.e_shnum)
++ {
++ /** @todo anything else to check here? */
++ iLoadShdr++;
++ break;
++ }
++ else
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: Prog Hdr #%u/LOAD#%u: Out of sections at " FMT_ELF_ADDR " LB " FMT_ELF_XWORD,
++ pszLogName, i, iLoad, uAddr, cbMem);
++ if (!(paShdrs[iLoadShdr].sh_flags & SHF_ALLOC))
++ {
++ if ( paShdrs[iLoadShdr].sh_type != SHT_NOBITS
++ && paShdrs[iLoadShdr].sh_size > 0
++ && off < paShdrs[iLoadShdr].sh_offset + paShdrs[iLoadShdr].sh_size
++ && paShdrs[iLoadShdr].sh_offset < off + cbMem)
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: Prog Hdr #%u/LOAD#%u: Overlaps with !SHF_ALLOC section at " FMT_ELF_OFF " LB " FMT_ELF_XWORD,
++ pszLogName, i, iLoad, paShdrs[iLoadShdr].sh_offset, paShdrs[iLoadShdr].sh_size);
++ pModElf->paShdrExtras[iLoadShdr].idxPhdr = UINT16_MAX;
++ iLoadShdr++;
++ continue;
++ }
++
++ if (uAddr != paShdrs[iLoadShdr].sh_addr)
++ {
++ /* Before the first section we expect headers to be loaded, so
++ that the file is simply mapped from file offset zero. */
++ if ( iLoadShdr == 1
++ && iLoad == 0
++ && paShdrs[1].sh_addr == paShdrs[1].sh_offset
++ && cbFile >= paShdrs[1].sh_offset
++ && cbMem >= paShdrs[1].sh_offset)
++ {
++ /* Modify paShdrs[0] to describe the gap. ".elf.headers" */
++ pModElf->iFirstSect = 0;
++ pModElf->paShdrs[0].sh_name = 0;
++ pModElf->paShdrs[0].sh_type = SHT_PROGBITS;
++ pModElf->paShdrs[0].sh_flags = SHF_ALLOC
++ | (pPhdr->p_flags & PF_W ? SHF_WRITE : 0)
++ | (pPhdr->p_flags & PF_X ? SHF_EXECINSTR : 0);
++ pModElf->paShdrs[0].sh_addr = uAddr;
++ pModElf->paShdrs[0].sh_offset = off;
++ pModElf->paShdrs[0].sh_size = paShdrs[1].sh_offset;
++ pModElf->paShdrs[0].sh_link = 0;
++ pModElf->paShdrs[0].sh_info = 0;
++ pModElf->paShdrs[0].sh_addralign = pPhdr->p_align;
++ pModElf->paShdrs[0].sh_entsize = 0;
++ *(Elf_Shdr *)pModElf->paOrgShdrs = pModElf->paShdrs[0]; /* (necessary for segment enumeration) */
++
++ uAddr += paShdrs[1].sh_offset;
++ cbMem -= paShdrs[1].sh_offset;
++ cbFile -= paShdrs[1].sh_offset;
++ off = paShdrs[1].sh_offset;
++ }
++ /* Alignment padding? Allow up to a page size. */
++ else if ( paShdrs[iLoadShdr].sh_addr > uAddr
++ && paShdrs[iLoadShdr].sh_addr - uAddr
++ < RT_MAX(paShdrs[iLoadShdr].sh_addralign, cbPage /*got.plt hack*/))
++ {
++ Elf_Xword cbAlignPadding = paShdrs[iLoadShdr].sh_addr - uAddr;
++ if (cbAlignPadding >= cbMem)
++ break;
++ cbMem -= cbAlignPadding;
++ uAddr += cbAlignPadding;
++ if (cbFile > cbAlignPadding)
++ {
++ off += cbAlignPadding;
++ cbFile -= cbAlignPadding;
++ }
++ else
++ {
++ off += cbFile;
++ cbFile = 0;
++ }
++ }
++ }
++
++ if ( uAddr == paShdrs[iLoadShdr].sh_addr
++ && cbMem >= paShdrs[iLoadShdr].sh_size
++ && ( paShdrs[iLoadShdr].sh_type != SHT_NOBITS
++ ? off == paShdrs[iLoadShdr].sh_offset
++ && cbFile >= paShdrs[iLoadShdr].sh_size /* this might be too strict... */
++ : cbFile == 0) )
++ {
++ if (paShdrs[iLoadShdr].sh_type != SHT_NOBITS)
++ {
++ off += paShdrs[iLoadShdr].sh_size;
++ cbFile -= paShdrs[iLoadShdr].sh_size;
++ }
++ uAddr += paShdrs[iLoadShdr].sh_size;
++ cbMem -= paShdrs[iLoadShdr].sh_size;
++ }
++ else
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: Prog Hdr #%u/LOAD#%u: Mismatch at " FMT_ELF_ADDR " LB " FMT_ELF_XWORD " (file " FMT_ELF_OFF " LB " FMT_ELF_XWORD ") with section #%u " FMT_ELF_ADDR " LB " FMT_ELF_XWORD " (file " FMT_ELF_OFF " sh_type=" FMT_ELF_WORD ")",
++ pszLogName, i, iLoad, uAddr, cbMem, off, cbFile,
++ iLoadShdr, paShdrs[iLoadShdr].sh_addr, paShdrs[iLoadShdr].sh_size,
++ paShdrs[iLoadShdr].sh_offset, paShdrs[iLoadShdr].sh_type);
++
++ pModElf->paShdrExtras[iLoadShdr].idxPhdr = iLoad;
++ iLoadShdr++;
++ } /* section loop */
++
++ iLoad++;
++ break;
++ }
++
++ case PT_DYNAMIC:
++ {
++ const Elf_Shdr *pShdr = &pModElf->paShdrs[pModElf->iShDynamic];
++ if (pPhdr->p_offset != pShdr->sh_offset)
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: Prog Hdr #%u/DYNAMIC: p_offset=" FMT_ELF_OFF " expected " FMT_ELF_OFF,
++ pszLogName, i, pPhdr->p_offset, pShdr->sh_offset);
++ if (RT_MAX(pPhdr->p_memsz, pPhdr->p_filesz) != pShdr->sh_size)
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: Prog Hdr #%u/DYNAMIC: expected " FMT_ELF_XWORD " for RT_MAX(p_memsz=" FMT_ELF_XWORD ", p_filesz=" FMT_ELF_XWORD ")",
++ pszLogName, i, pShdr->sh_size, pPhdr->p_memsz, pPhdr->p_filesz);
++ cDynamic++;
++ break;
++ }
++ }
++ }
++
++ if (iLoad == 0)
++ return RTERRINFO_LOG_SET_F(pErrInfo, rc, "%s: No PT_LOAD program headers", pszLogName);
++ if (cDynamic != 1)
++ return RTERRINFO_LOG_SET_F(pErrInfo, rc, "%s: No program header for the DYNAMIC section", pszLogName);
++
++ cbImage -= uLinkAddress;
++ pModElf->cbImage = (uint64_t)cbImage;
++ pModElf->LinkAddress = uLinkAddress;
++ AssertReturn(pModElf->cbImage == cbImage, VERR_INTERNAL_ERROR_5);
++ Log3(("RTLdrELF: LinkAddress=" FMT_ELF_ADDR " cbImage=" FMT_ELF_ADDR " (from PT_LOAD)\n", uLinkAddress, cbImage));
++
++ for (; iLoadShdr < pModElf->Ehdr.e_shnum; iLoadShdr++)
++ if ( !(paShdrs[iLoadShdr].sh_flags & SHF_ALLOC)
++ || paShdrs[iLoadShdr].sh_size == 0)
++ pModElf->paShdrExtras[iLoadShdr].idxPhdr = UINT16_MAX;
++ else
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: No PT_LOAD for section #%u " FMT_ELF_ADDR " LB " FMT_ELF_XWORD " (file " FMT_ELF_OFF " sh_type=" FMT_ELF_WORD ")",
++ pszLogName, iLoadShdr, paShdrs[iLoadShdr].sh_addr, paShdrs[iLoadShdr].sh_size,
++ paShdrs[iLoadShdr].sh_offset, paShdrs[iLoadShdr].sh_type);
++
++ /*
++ * Load and validate the dynamic table. We have got / will get most of the
++ * info we need from the section table, so we must make sure this matches up.
++ */
++ Log3(("RTLdrELF: Dynamic section - %u entries\n", pModElf->cDynamic));
++ size_t const cbDynamic = pModElf->cDynamic * sizeof(pModElf->paDynamic[0]);
++ Elf_Dyn * const paDynamic = (Elf_Dyn *)RTMemAlloc(cbDynamic);
++ AssertReturn(paDynamic, VERR_NO_MEMORY);
++ pModElf->paDynamic = paDynamic;
++
++ rc = pModElf->Core.pReader->pfnRead(pModElf->Core.pReader, paDynamic, cbDynamic, paShdrs[pModElf->iShDynamic].sh_offset);
++ if (RT_FAILURE(rc))
++ return RTERRINFO_LOG_SET_F(pErrInfo, rc, "%s: pfnRead(,,%#zx, " FMT_ELF_OFF ") -> %Rrc",
++ pszLogName, cbDynamic, paShdrs[pModElf->iShDynamic].sh_offset, rc);
++
++ for (uint32_t i = 0; i < pModElf->cDynamic; i++)
++ {
++#define LOG_VALIDATE_PTR_RET(szName) do { \
++ Log3(("RTLdrELF: DT[%u]: %16s " FMT_ELF_ADDR "\n", i, szName, paDynamic[i].d_un.d_ptr)); \
++ if ((uint64_t)paDynamic[i].d_un.d_ptr - uLinkAddress < cbImage) { /* likely */ } \
++ else return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "%s: DT[%u]/" szName ": Invalid address " FMT_ELF_ADDR " (valid range: " FMT_ELF_ADDR " LB " FMT_ELF_ADDR ")", \
++ pszLogName, i, paDynamic[i].d_un.d_ptr, uLinkAddress, cbImage); \
++ } while (0)
++#define LOG_VALIDATE_PTR_VAL_RET(szName, uExpected) do { \
++ Log3(("RTLdrELF: DT[%u]: %16s " FMT_ELF_ADDR "\n", i, szName, (uint64_t)paDynamic[i].d_un.d_ptr)); \
++ if (paDynamic[i].d_un.d_ptr == (Elf_Addr)(uExpected)) { /* likely */ } \
++ else return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "%s: DT[%u]/" szName ": " FMT_ELF_ADDR ", expected " FMT_ELF_ADDR, \
++ pszLogName, i, paDynamic[i].d_un.d_ptr, (Elf_Addr)(uExpected)); \
++ } while (0)
++#define LOG_VALIDATE_STR_RET(szName) do { \
++ Log3(("RTLdrELF: DT[%u]: %16s %#RX64\n", i, szName, (uint64_t)paDynamic[i].d_un.d_val)); \
++ if ((uint64_t)paDynamic[i].d_un.d_val < pModElf->Dyn.cbStr) { /* likely */ } \
++ else return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "%s: DT[%u]/" szName ": Invalid string table offset %#RX64 (max %#x)", \
++ pszLogName, i, (uint64_t)paDynamic[i].d_un.d_val, pModElf->Dyn.cbStr); \
++ } while (0)
++#define LOG_VALIDATE_VAL_RET(szName, uExpected) do { \
++ Log3(("RTLdrELF: DT[%u]: %16s %#RX64\n", i, szName, (uint64_t)paDynamic[i].d_un.d_val)); \
++ if ((uint64_t)paDynamic[i].d_un.d_val == (uint64_t)(uExpected)) { /* likely */ } \
++ else return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "%s: DT[%u]/" szName ": %#RX64, expected %#RX64", \
++ pszLogName, i, (uint64_t)paDynamic[i].d_un.d_val, (uint64_t)(uExpected)); \
++ } while (0)
++#define SET_RELOC_TYPE_RET(a_szName, a_uType) do { \
++ if (pModElf->DynInfo.uRelocType == 0 || pModElf->DynInfo.uRelocType == (a_uType)) \
++ pModElf->DynInfo.uRelocType = (a_uType); \
++ else return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "%s: DT[%u]/" a_szName ": Mixing DT_RELA and DT_REL", pszLogName, i); \
++ } while (0)
++#define SET_INFO_FIELD_RET(a_szName, a_Field, a_Value, a_UnsetValue, a_szFmt) do { \
++ if ((a_Field) == (a_UnsetValue) && (a_Value) != (a_UnsetValue)) \
++ (a_Field) = (a_Value); /* likely */ \
++ else if ((a_Field) != (a_UnsetValue)) \
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "%s: DT[%u]/" a_szName ": Multiple entries (first value " a_szFmt ", second " a_szFmt ")", pszLogName, i, (a_Field), (a_Value)); \
++ else if ((a_Value) != (a_UnsetValue)) \
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "%s: DT[%u]/" a_szName ": Unexpected value " a_szFmt, pszLogName, i, (a_Value)); \
++ } while (0)
++#define FIND_MATCHING_SECTION_RET(a_szName, a_ExtraMatchExpr, a_idxShFieldToSet) do { \
++ unsigned iSh; \
++ for (iSh = 1; iSh < pModElf->Ehdr.e_shnum; iSh++) \
++ if ( paShdrs[iSh].sh_addr == paDynamic[i].d_un.d_ptr \
++ && (a_ExtraMatchExpr)) \
++ { \
++ (a_idxShFieldToSet) = iSh; \
++ if (pModElf->paShdrExtras[iSh].idxDt != UINT16_MAX) \
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, \
++ "%s: DT[%u]/" a_szName ": section #%u (" FMT_ELF_ADDR ") already referenced by DT[%u]", \
++ pszLogName, i, iSh, paShdrs[iSh].sh_addr, pModElf->paShdrExtras[iSh].idxDt); \
++ pModElf->paShdrExtras[iSh].idxDt = i; \
++ pModElf->paShdrExtras[iSh].uDtTag = (uint32_t)paDynamic[i].d_tag; \
++ break; \
++ } \
++ if (iSh < pModElf->Ehdr.e_shnum) { /* likely */ } \
++ else return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "%s: DT[%u]/" a_szName ": No matching section for " FMT_ELF_ADDR, pszLogName, i, paDynamic[i].d_un.d_ptr); \
++ } while (0)
++#define ONLY_FOR_DEBUG_OR_VALIDATION_RET(a_szName) do { \
++ if (fFlags & (RTLDR_O_FOR_DEBUG | RTLDR_O_FOR_VALIDATION)) { /* likely */ } \
++ else return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "%s: DT[%u]/" a_szName ": Not supported (" FMT_ELF_ADDR ")", pszLogName, i, paDynamic[i].d_un.d_ptr); \
++ } while (0)
++#define LOG_NON_VALUE_ENTRY(a_szName) Log3(("RTLdrELF: DT[%u]: %16s (%#RX64)\n", i, a_szName, (uint64_t)paDynamic[i].d_un.d_val))
++
++ switch (paDynamic[i].d_tag)
++ {
++ case DT_NULL:
++ LOG_NON_VALUE_ENTRY("DT_NULL");
++ for (unsigned iNull = i + 1; iNull < pModElf->cDynamic; iNull++)
++ if (paDynamic[i].d_tag == DT_NULL) /* Not technically a bug, but let's try being extremely strict for now */
++ LOG_NON_VALUE_ENTRY("DT_NULL");
++ else if (!(fFlags & (RTLDR_O_FOR_DEBUG | RTLDR_O_FOR_VALIDATION)))
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: DT[%u]/DT_NULL: Dynamic section isn't zero padded (extra #%u of #%u)",
++ pszLogName, i, iNull - i, pModElf->cDynamic - i);
++ i = pModElf->cDynamic;
++ break;
++ case DT_NEEDED:
++ LOG_VALIDATE_STR_RET("DT_NEEDED");
++ break;
++ case DT_PLTRELSZ:
++ Log3(("RTLdrELF: DT[%u]: %16s %#RX64 bytes\n", i, "DT_PLTRELSZ", (uint64_t)paDynamic[i].d_un.d_val));
++ SET_INFO_FIELD_RET("DT_PLTRELSZ", pModElf->DynInfo.cbJmpRelocs, (Elf_Xword)paDynamic[i].d_un.d_val, 0, FMT_ELF_XWORD);
++ break;
++ case DT_PLTGOT:
++ LOG_VALIDATE_PTR_RET("DT_PLTGOT");
++ break;
++ case DT_HASH:
++ LOG_VALIDATE_PTR_RET("DT_HASH");
++ break;
++ case DT_STRTAB:
++ LOG_VALIDATE_PTR_VAL_RET("DT_STRTAB", paShdrs[pModElf->Dyn.iStrSh].sh_addr);
++ pModElf->paShdrExtras[pModElf->Dyn.iStrSh].idxDt = i;
++ pModElf->paShdrExtras[pModElf->Dyn.iSymSh].uDtTag = DT_STRTAB;
++ break;
++ case DT_SYMTAB:
++ LOG_VALIDATE_PTR_VAL_RET("DT_SYMTAB", paShdrs[pModElf->Dyn.iSymSh].sh_addr);
++ pModElf->paShdrExtras[pModElf->Dyn.iSymSh].idxDt = i;
++ pModElf->paShdrExtras[pModElf->Dyn.iSymSh].uDtTag = DT_SYMTAB;
++ break;
++ case DT_RELA:
++ LOG_VALIDATE_PTR_RET("DT_RELA");
++ SET_RELOC_TYPE_RET("DT_RELA", DT_RELA);
++ SET_INFO_FIELD_RET("DT_RELA", pModElf->DynInfo.uPtrRelocs, paDynamic[i].d_un.d_ptr, ~(Elf_Addr)0, FMT_ELF_ADDR);
++ FIND_MATCHING_SECTION_RET("DT_RELA", paShdrs[iSh].sh_type == SHT_RELA, pModElf->DynInfo.idxShRelocs);
++ break;
++ case DT_RELASZ:
++ Log3(("RTLdrELF: DT[%u]: %16s %#RX64 bytes\n", i, "DT_RELASZ", (uint64_t)paDynamic[i].d_un.d_val));
++ SET_RELOC_TYPE_RET("DT_RELASZ", DT_RELA);
++ SET_INFO_FIELD_RET("DT_RELASZ", pModElf->DynInfo.cbRelocs, (Elf_Xword)paDynamic[i].d_un.d_val, 0, FMT_ELF_XWORD);
++ break;
++ case DT_RELAENT:
++ LOG_VALIDATE_VAL_RET("DT_RELAENT", sizeof(Elf_Rela));
++ SET_RELOC_TYPE_RET("DT_RELAENT", DT_RELA);
++ SET_INFO_FIELD_RET("DT_RELAENT", pModElf->DynInfo.cbRelocEntry, (unsigned)sizeof(Elf_Rela), 0, "%u");
++ break;
++ case DT_STRSZ:
++ LOG_VALIDATE_VAL_RET("DT_STRSZ", pModElf->Dyn.cbStr);
++ break;
++ case DT_SYMENT:
++ LOG_VALIDATE_VAL_RET("DT_SYMENT", sizeof(Elf_Sym));
++ break;
++ case DT_INIT:
++ LOG_VALIDATE_PTR_RET("DT_INIT");
++ ONLY_FOR_DEBUG_OR_VALIDATION_RET("DT_INIT");
++ break;
++ case DT_FINI:
++ LOG_VALIDATE_PTR_RET("DT_FINI");
++ ONLY_FOR_DEBUG_OR_VALIDATION_RET("DT_FINI");
++ break;
++ case DT_SONAME:
++ LOG_VALIDATE_STR_RET("DT_SONAME");
++ break;
++ case DT_RPATH:
++ LOG_VALIDATE_STR_RET("DT_RPATH");
++ break;
++ case DT_SYMBOLIC:
++ LOG_NON_VALUE_ENTRY("DT_SYMBOLIC");
++ break;
++ case DT_REL:
++ LOG_VALIDATE_PTR_RET("DT_REL");
++ SET_RELOC_TYPE_RET("DT_REL", DT_REL);
++ SET_INFO_FIELD_RET("DT_REL", pModElf->DynInfo.uPtrRelocs, paDynamic[i].d_un.d_ptr, ~(Elf_Addr)0, FMT_ELF_ADDR);
++ FIND_MATCHING_SECTION_RET("DT_REL", paShdrs[iSh].sh_type == SHT_REL, pModElf->DynInfo.idxShRelocs);
++ break;
++ case DT_RELSZ:
++ Log3(("RTLdrELF: DT[%u]: %16s %#RX64 bytes\n", i, "DT_RELSZ", (uint64_t)paDynamic[i].d_un.d_val));
++ SET_RELOC_TYPE_RET("DT_RELSZ", DT_REL);
++ SET_INFO_FIELD_RET("DT_RELSZ", pModElf->DynInfo.cbRelocs, (Elf_Xword)paDynamic[i].d_un.d_val, 0, FMT_ELF_XWORD);
++ break;
++ case DT_RELENT:
++ LOG_VALIDATE_VAL_RET("DT_RELENT", sizeof(Elf_Rel));
++ SET_RELOC_TYPE_RET("DT_RELENT", DT_REL);
++ SET_INFO_FIELD_RET("DT_RELENT", pModElf->DynInfo.cbRelocEntry, (unsigned)sizeof(Elf_Rel), 0, "%u");
++ break;
++ case DT_PLTREL:
++ if (paDynamic[i].d_un.d_val != DT_RELA && paDynamic[i].d_un.d_val != DT_REL)
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "%s: DT[%u]/DT_PLTREL: Invalid value %#RX64",
++ pszLogName, i, (uint64_t)paDynamic[i].d_un.d_val);
++ Log3(("RTLdrELF: DT[%u]: %16s DT_REL%s\n", i, "DT_PLTREL", paDynamic[i].d_un.d_val == DT_RELA ? "A" : ""));
++ SET_INFO_FIELD_RET("DT_PLTREL", pModElf->DynInfo.uJmpRelocType, (unsigned)paDynamic[i].d_un.d_val, 0, "%u");
++ break;
++ case DT_DEBUG:
++ LOG_VALIDATE_PTR_RET("DT_DEBUG");
++ break;
++ case DT_TEXTREL:
++ LOG_NON_VALUE_ENTRY("DT_TEXTREL");
++ break;
++ case DT_JMPREL:
++ LOG_VALIDATE_PTR_RET("DT_JMPREL");
++ SET_INFO_FIELD_RET("DT_JMPREL", pModElf->DynInfo.uPtrJmpRelocs, paDynamic[i].d_un.d_ptr, ~(Elf_Addr)0, FMT_ELF_ADDR);
++ FIND_MATCHING_SECTION_RET("DT_JMPREL", 1, pModElf->DynInfo.idxShJmpRelocs);
++ break;
++ case DT_BIND_NOW:
++ LOG_NON_VALUE_ENTRY("DT_BIND_NOW");
++ break;
++ case DT_INIT_ARRAY:
++ LOG_VALIDATE_PTR_RET("DT_INIT_ARRAY");
++ ONLY_FOR_DEBUG_OR_VALIDATION_RET("DT_INIT_ARRAY");
++ break;
++ case DT_FINI_ARRAY:
++ LOG_VALIDATE_PTR_RET("DT_FINI_ARRAY");
++ ONLY_FOR_DEBUG_OR_VALIDATION_RET("DT_FINI_ARRAY");
++ break;
++ case DT_INIT_ARRAYSZ:
++ Log3(("RTLdrELF: DT[%u]: %16s %#RX64 bytes\n", i, "DT_INIT_ARRAYSZ", (uint64_t)paDynamic[i].d_un.d_val));
++ ONLY_FOR_DEBUG_OR_VALIDATION_RET("DT_INIT_ARRAYSZ");
++ break;
++ case DT_FINI_ARRAYSZ:
++ Log3(("RTLdrELF: DT[%u]: %16s %#RX64 bytes\n", i, "DT_FINI_ARRAYSZ", (uint64_t)paDynamic[i].d_un.d_val));
++ ONLY_FOR_DEBUG_OR_VALIDATION_RET("DT_FINI_ARRAYSZ");
++ break;
++ case DT_RUNPATH:
++ LOG_VALIDATE_STR_RET("DT_RUNPATH");
++ break;
++ case DT_FLAGS:
++ Log3(("RTLdrELF: DT[%u]: %16s %#RX64\n", i, "DT_FLAGS", (uint64_t)paDynamic[i].d_un.d_val));
++ break;
++ case DT_PREINIT_ARRAY:
++ LOG_VALIDATE_PTR_RET("DT_PREINIT_ARRAY");
++ ONLY_FOR_DEBUG_OR_VALIDATION_RET("DT_PREINIT_ARRAY");
++ break;
++ case DT_PREINIT_ARRAYSZ:
++ Log3(("RTLdrELF: DT[%u]: %16s %#RX64 bytes\n", i, "DT_PREINIT_ARRAYSZ", (uint64_t)paDynamic[i].d_un.d_val));
++ ONLY_FOR_DEBUG_OR_VALIDATION_RET("DT_PREINIT_ARRAYSZ");
++ break;
++ default:
++ if ( paDynamic[i].d_un.d_val < DT_ENCODING
++ || (paDynamic[i].d_un.d_val & 1))
++ Log3(("RTLdrELF: DT[%u]: %#010RX64 %#RX64%s\n", i, (uint64_t)paDynamic[i].d_tag,
++ (uint64_t)paDynamic[i].d_un.d_val, paDynamic[i].d_un.d_val >= DT_ENCODING ? " (val)" : ""));
++ else
++ {
++ Log3(("RTLdrELF: DT[%u]: %#010RX64 " FMT_ELF_ADDR " (addr)\n",
++ i, (uint64_t)paDynamic[i].d_tag, paDynamic[i].d_un.d_ptr));
++ if ((uint64_t)paDynamic[i].d_un.d_ptr - uLinkAddress >= cbImage)
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: DT[%u]/%#RX64: Invalid address " FMT_ELF_ADDR " (valid range: " FMT_ELF_ADDR " LB " FMT_ELF_ADDR ")",
++ pszLogName, i, (uint64_t)paDynamic[i].d_tag,
++ paDynamic[i].d_un.d_ptr, uLinkAddress, cbImage);
++ }
++ break;
++ }
++#undef LOG_VALIDATE_VAL_RET
++#undef LOG_VALIDATE_STR_RET
++#undef LOG_VALIDATE_PTR_VAL_RET
++#undef LOG_VALIDATE_PTR_RET
++#undef SET_RELOC_TYPE_RET
++#undef SET_INFO_FIELD_RET
++#undef FIND_MATCHING_SECTION_RET
++#undef ONLY_FOR_DEBUG_OR_VALIDATION_RET
++ }
++
++ /*
++ * Validate the relocation information we've gathered.
++ */
++ Elf_Word uShTypeArch = SHT_RELA; /** @todo generalize architecture specific stuff using its own code template header. */
++ switch (pModElf->Core.enmArch)
++ {
++ case RTLDRARCH_AMD64:
++ break;
++ case RTLDRARCH_X86_32:
++ uShTypeArch = SHT_REL;
++ break;
++ default:
++ AssertFailedBreak(/** @todo page size for got.plt hacks */);
++
++ }
++
++ if (pModElf->DynInfo.uRelocType != 0)
++ {
++ const char * const pszModifier = pModElf->DynInfo.uRelocType == DT_RELA ? "A" : "";
++ if (pModElf->DynInfo.uPtrRelocs == ~(Elf_Addr)0)
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "%s: Missing DT_REL%s", pszLogName, pszModifier);
++ if (pModElf->DynInfo.cbRelocs == 0)
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "%s: Missing DT_REL%sSZ", pszLogName, pszModifier);
++ if (pModElf->DynInfo.cbRelocEntry == 0)
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "%s: Missing DT_REL%sENT", pszLogName, pszModifier);
++ Elf_Shdr const *pShdrRelocs = &paShdrs[pModElf->DynInfo.idxShRelocs];
++ Elf_Word const uShType = pModElf->DynInfo.uJmpRelocType == DT_RELA ? SHT_RELA : SHT_REL;
++ if (pShdrRelocs->sh_type != uShType)
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "%s: DT_REL%s* does not match section type: %u vs %u",
++ pszLogName, pszModifier, pShdrRelocs->sh_type, uShType);
++ if (pShdrRelocs->sh_size != pModElf->DynInfo.cbRelocs)
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "%s: DT_REL%sSZ does not match section size: %u vs %u",
++ pszLogName, pszModifier, pShdrRelocs->sh_size, pModElf->DynInfo.cbRelocs);
++ if (uShType != uShTypeArch)
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "%s: DT_REL%s* does not match architecture: %u, arch wants %u",
++ pszLogName, pszModifier, uShType, uShTypeArch);
++ }
++
++ if ( pModElf->DynInfo.uPtrJmpRelocs != ~(Elf_Addr)0
++ || pModElf->DynInfo.cbJmpRelocs != 0
++ || pModElf->DynInfo.uJmpRelocType != 0)
++ {
++ if (pModElf->DynInfo.uPtrJmpRelocs == ~(Elf_Addr)0)
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "%s: Missing DT_JMPREL", pszLogName);
++ if (pModElf->DynInfo.cbJmpRelocs == 0)
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "%s: Missing DT_PLTRELSZ", pszLogName);
++ if (pModElf->DynInfo.uJmpRelocType == 0)
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "%s: Missing DT_PLTREL", pszLogName);
++ Elf_Shdr const *pShdrRelocs = &paShdrs[pModElf->DynInfo.idxShJmpRelocs];
++ Elf_Word const uShType = pModElf->DynInfo.uJmpRelocType == DT_RELA ? SHT_RELA : SHT_REL;
++ if (pShdrRelocs->sh_type != uShType)
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "%s: DT_PLTREL does not match section type: %u vs %u",
++ pszLogName, pShdrRelocs->sh_type, uShType);
++ if (pShdrRelocs->sh_size != pModElf->DynInfo.cbJmpRelocs)
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "%s: DT_PLTRELSZ does not match section size: %u vs %u",
++ pszLogName, pShdrRelocs->sh_size, pModElf->DynInfo.cbJmpRelocs);
++ if (uShType != uShTypeArch)
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT, "%s: DT_PLTREL does not match architecture: %u, arch wants %u",
++ pszLogName, uShType, uShTypeArch);
++ }
++
++ /*
++ * Check that there aren't any other relocations hiding in the section table.
++ */
++ for (uint32_t i = 1; i < pModElf->Ehdr.e_shnum; i++)
++ if ( (paShdrs[i].sh_type == SHT_REL || paShdrs[i].sh_type == SHT_RELA)
++ && pModElf->paShdrExtras[i].uDtTag != DT_REL
++ && pModElf->paShdrExtras[i].uDtTag != DT_RELA
++ && pModElf->paShdrExtras[i].uDtTag != DT_JMPREL)
++ {
++ char szSecHdrNm[80];
++ return RTERRINFO_LOG_SET_F(pErrInfo, VERR_BAD_EXE_FORMAT,
++ "%s: section header #%u (%s type=" FMT_ELF_WORD " size=" FMT_ELF_XWORD ") contains relocations not referenced by the dynamic section",
++ pszLogName, i,
++ RTLDRELF_NAME(GetSHdrName)(pModElf, paShdrs[i].sh_name, szSecHdrNm, sizeof(szSecHdrNm)),
++ paShdrs[i].sh_type, paShdrs[i].sh_size);
++ }
+
+ return VINF_SUCCESS;
+ }
+@@ -1866,8 +2819,9 @@ static int RTLDRELF_NAME(ValidateSection
+ * @param fFlags Reserved, MBZ.
+ * @param enmArch Architecture specifier.
+ * @param phLdrMod Where to store the handle.
++ * @param pErrInfo Where to return extended error info. Optional.
+ */
+-static int RTLDRELF_NAME(Open)(PRTLDRREADER pReader, uint32_t fFlags, RTLDRARCH enmArch, PRTLDRMOD phLdrMod)
++static int RTLDRELF_NAME(Open)(PRTLDRREADER pReader, uint32_t fFlags, RTLDRARCH enmArch, PRTLDRMOD phLdrMod, PRTERRINFO pErrInfo)
+ {
+ const char *pszLogName = pReader->pfnLogName(pReader);
+ uint64_t cbRawImage = pReader->pfnSize(pReader);
+@@ -1891,21 +2845,42 @@ static int RTLDRELF_NAME(Open)(PRTLDRREA
+ #else
+ pModElf->Core.enmArch = RTLDRARCH_AMD64;
+ #endif
+- //pModElf->pvBits = NULL;
+- //pModElf->Ehdr = {0};
+- //pModElf->paShdrs = NULL;
+- //pModElf->paSyms = NULL;
+- pModElf->iSymSh = ~0U;
+- //pModElf->cSyms = 0;
+- pModElf->iStrSh = ~0U;
+- //pModElf->cbStr = 0;
+- //pModElf->cbImage = 0;
+- //pModElf->LinkAddress = 0;
+- //pModElf->pStr = NULL;
+- //pModElf->cbShStr = 0;
+- //pModElf->pShStr = NULL;
+- //pModElf->iShEhFrame = 0;
+- //pModElf->iShEhFrameHdr = 0;
++ //pModElf->pvBits = NULL;
++ //pModElf->Ehdr = {0};
++ //pModElf->paShdrs = NULL;
++ //pModElf->Rel.paSyms = NULL;
++ pModElf->Rel.iSymSh = ~0U;
++ //pModElf->Rel.cSyms = 0;
++ pModElf->Rel.iStrSh = ~0U;
++ //pModElf->Rel.cbStr = 0;
++ //pModElf->Rel.pStr = NULL;
++ //pModElf->Dyn.paSyms = NULL;
++ pModElf->Dyn.iSymSh = ~0U;
++ //pModElf->Dyn.cSyms = 0;
++ pModElf->Dyn.iStrSh = ~0U;
++ //pModElf->Dyn.cbStr = 0;
++ //pModElf->Dyn.pStr = NULL;
++ pModElf->iFirstSect = 1;
++ //pModElf->fShdrInOrder = false;
++ //pModElf->cbImage = 0;
++ pModElf->LinkAddress = ~(Elf_Addr)0;
++ //pModElf->cbShStr = 0;
++ //pModElf->pShStr = NULL;
++ //pModElf->iShEhFrame = 0;
++ //pModElf->iShEhFrameHdr= 0;
++ pModElf->iShDynamic = ~0U;
++ //pModElf->cDynamic = 0;
++ //pModElf->paDynamic = NULL;
++ //pModElf->paPhdrs = NULL;
++ pModElf->DynInfo.uPtrRelocs = ~(Elf_Addr)0;
++ //pModElf->DynInfo.cbRelocs = 0;
++ //pModElf->DynInfo.cbRelocEntry = 0;
++ //pModElf->DynInfo.uRelocType = 0;
++ //pModElf->DynInfo.idxShRelocs = 0;
++ pModElf->DynInfo.uPtrJmpRelocs = ~(Elf_Addr)0;
++ //pModElf->DynInfo.cbJmpRelocs = 0;
++ //pModElf->DynInfo.uJmpRelocType = 0;
++ //pModElf->DynInfo.idxShJmpRelocs = 0;
+
+ /*
+ * Read and validate the ELF header and match up the CPU architecture.
+@@ -1914,7 +2889,7 @@ static int RTLDRELF_NAME(Open)(PRTLDRREA
+ if (RT_SUCCESS(rc))
+ {
+ RTLDRARCH enmArchImage = RTLDRARCH_INVALID; /* shut up gcc */
+- rc = RTLDRELF_NAME(ValidateElfHeader)(&pModElf->Ehdr, pszLogName, cbRawImage, &enmArchImage);
++ rc = RTLDRELF_NAME(ValidateElfHeader)(&pModElf->Ehdr, cbRawImage, pszLogName, &enmArchImage, pErrInfo);
+ if (RT_SUCCESS(rc))
+ {
+ if ( enmArch != RTLDRARCH_WHATEVER
+@@ -1929,7 +2904,7 @@ static int RTLDRELF_NAME(Open)(PRTLDRREA
+ * introspection methods.
+ */
+ size_t const cbShdrs = pModElf->Ehdr.e_shnum * sizeof(Elf_Shdr);
+- Elf_Shdr *paShdrs = (Elf_Shdr *)RTMemAlloc(cbShdrs * 2);
++ Elf_Shdr *paShdrs = (Elf_Shdr *)RTMemAlloc(cbShdrs * 2 + sizeof(RTLDRMODELFSHX) * pModElf->Ehdr.e_shnum);
+ if (paShdrs)
+ {
+ pModElf->paShdrs = paShdrs;
+@@ -1939,111 +2914,77 @@ static int RTLDRELF_NAME(Open)(PRTLDRREA
+ memcpy(&paShdrs[pModElf->Ehdr.e_shnum], paShdrs, cbShdrs);
+ pModElf->paOrgShdrs = &paShdrs[pModElf->Ehdr.e_shnum];
+
++ pModElf->paShdrExtras = (PRTLDRMODELFSHX)&pModElf->paOrgShdrs[pModElf->Ehdr.e_shnum];
++ memset(pModElf->paShdrExtras, 0xff, sizeof(RTLDRMODELFSHX) * pModElf->Ehdr.e_shnum);
++
+ pModElf->cbShStr = paShdrs[pModElf->Ehdr.e_shstrndx].sh_size;
+
+ /*
+ * Validate the section headers and find relevant sections.
+ */
+- Elf_Addr uNextAddr = 0;
+- for (unsigned i = 0; i < pModElf->Ehdr.e_shnum; i++)
+- {
+- rc = RTLDRELF_NAME(ValidateSectionHeader)(pModElf, i, pszLogName, cbRawImage);
+- if (RT_FAILURE(rc))
+- break;
+-
+- /* We're looking for symbol tables. */
+- if (paShdrs[i].sh_type == SHT_SYMTAB)
+- {
+- if (pModElf->iSymSh != ~0U)
+- {
+- Log(("RTLdrElf: %s: Multiple symbol tabs! iSymSh=%d i=%d\n", pszLogName, pModElf->iSymSh, i));
+- rc = VERR_LDRELF_MULTIPLE_SYMTABS;
+- break;
+- }
+- pModElf->iSymSh = i;
+- pModElf->cSyms = (unsigned)(paShdrs[i].sh_size / sizeof(Elf_Sym));
+- AssertBreakStmt(pModElf->cSyms == paShdrs[i].sh_size / sizeof(Elf_Sym), rc = VERR_IMAGE_TOO_BIG);
+- pModElf->iStrSh = paShdrs[i].sh_link;
+- pModElf->cbStr = (unsigned)paShdrs[pModElf->iStrSh].sh_size;
+- AssertBreakStmt(pModElf->cbStr == paShdrs[pModElf->iStrSh].sh_size, rc = VERR_IMAGE_TOO_BIG);
+- }
+-
+- /* Special checks for the section string table. */
+- if (i == pModElf->Ehdr.e_shstrndx)
+- {
+- if (paShdrs[i].sh_type != SHT_STRTAB)
+- {
+- Log(("RTLdrElf: Section header string table is not a SHT_STRTAB: %#x\n", paShdrs[i].sh_type));
+- rc = VERR_BAD_EXE_FORMAT;
+- break;
+- }
+- if (paShdrs[i].sh_size == 0)
+- {
+- Log(("RTLdrElf: Section header string table is empty\n"));
+- rc = VERR_BAD_EXE_FORMAT;
+- break;
+- }
+- }
++ rc = RTLDRELF_NAME(ValidateAndProcessSectionHeaders)(pModElf, paShdrs, cbRawImage, pszLogName, pErrInfo);
+
+- /* Kluge for the .data..percpu segment in 64-bit linux kernels. */
+- if (paShdrs[i].sh_flags & SHF_ALLOC)
+- {
+- if ( paShdrs[i].sh_addr == 0
+- && paShdrs[i].sh_addr < uNextAddr)
+- {
+- Elf_Addr uAddr = RT_ALIGN_T(uNextAddr, paShdrs[i].sh_addralign, Elf_Addr);
+- Log(("RTLdrElf: Out of order section #%d; adjusting sh_addr from " FMT_ELF_ADDR " to " FMT_ELF_ADDR "\n",
+- i, paShdrs[i].sh_addr, uAddr));
+- paShdrs[i].sh_addr = uAddr;
+- }
+- uNextAddr = paShdrs[i].sh_addr + paShdrs[i].sh_size;
+- }
+- } /* for each section header */
++ /*
++ * Read validate and process program headers if ET_DYN or ET_EXEC.
++ */
++ if (RT_SUCCESS(rc) && (pModElf->Ehdr.e_type == ET_DYN || pModElf->Ehdr.e_type == ET_EXEC))
++ rc = RTLDRELF_NAME(ValidateAndProcessDynamicInfo)(pModElf, cbRawImage, fFlags, pszLogName, pErrInfo);
+
+ /*
+- * Calculate the image base address if the image isn't relocatable.
++ * Massage the section headers.
+ */
+- if (RT_SUCCESS(rc) && pModElf->Ehdr.e_type != ET_REL)
++ if (RT_SUCCESS(rc))
+ {
+- pModElf->LinkAddress = ~(Elf_Addr)0;
+- for (unsigned i = 0; i < pModElf->Ehdr.e_shnum; i++)
+- if ( (paShdrs[i].sh_flags & SHF_ALLOC)
+- && paShdrs[i].sh_addr < pModElf->LinkAddress)
+- pModElf->LinkAddress = paShdrs[i].sh_addr;
+- if (pModElf->LinkAddress == ~(Elf_Addr)0)
++ if (pModElf->Ehdr.e_type == ET_REL)
+ {
+- AssertFailed();
+- rc = VERR_LDR_GENERAL_FAILURE;
+- }
+- if (pModElf->Ehdr.e_type == ET_DYN && pModElf->LinkAddress < 0x1000)
++ /* Do allocations and figure the image size: */
+ pModElf->LinkAddress = 0;
++ for (unsigned i = 1; i < pModElf->Ehdr.e_shnum; i++)
++ if (paShdrs[i].sh_flags & SHF_ALLOC)
++ {
++ paShdrs[i].sh_addr = paShdrs[i].sh_addralign
++ ? RT_ALIGN_T(pModElf->cbImage, paShdrs[i].sh_addralign, Elf_Addr)
++ : (Elf_Addr)pModElf->cbImage;
++ Elf_Addr EndAddr = paShdrs[i].sh_addr + paShdrs[i].sh_size;
++ if (pModElf->cbImage < EndAddr)
++ {
++ pModElf->cbImage = (size_t)EndAddr;
++ AssertMsgBreakStmt(pModElf->cbImage == EndAddr, (FMT_ELF_ADDR "\n", EndAddr), rc = VERR_IMAGE_TOO_BIG);
++ }
++ Log2(("RTLdrElf: %s: Assigned " FMT_ELF_ADDR " to section #%d\n", pszLogName, paShdrs[i].sh_addr, i));
++ }
++ }
++ else
++ {
++ /* Convert sh_addr to RVA: */
++ Assert(pModElf->LinkAddress != ~(Elf_Addr)0);
++ for (unsigned i = 0 /*!*/; i < pModElf->Ehdr.e_shnum; i++)
++ if (paShdrs[i].sh_flags & SHF_ALLOC)
++ paShdrs[i].sh_addr -= pModElf->LinkAddress;
++ }
+ }
+
+ /*
+- * Perform allocations / RVA calculations, determine the image size.
++ * Check if the sections are in order by address, as that will simplify
++ * enumeration and address translation.
+ */
+- if (RT_SUCCESS(rc))
+- for (unsigned i = 0; i < pModElf->Ehdr.e_shnum; i++)
+- if (paShdrs[i].sh_flags & SHF_ALLOC)
++ pModElf->fShdrInOrder = true;
++ Elf_Addr uEndAddr = 0;
++ for (unsigned i = pModElf->iFirstSect; i < pModElf->Ehdr.e_shnum; i++)
++ if (paShdrs[i].sh_flags & SHF_ALLOC)
++ {
++ if (uEndAddr <= paShdrs[i].sh_addr)
++ uEndAddr = paShdrs[i].sh_addr + paShdrs[i].sh_size;
++ else
+ {
+- if (pModElf->Ehdr.e_type == ET_REL)
+- paShdrs[i].sh_addr = paShdrs[i].sh_addralign
+- ? RT_ALIGN_T(pModElf->cbImage, paShdrs[i].sh_addralign, Elf_Addr)
+- : (Elf_Addr)pModElf->cbImage;
+- else
+- paShdrs[i].sh_addr -= pModElf->LinkAddress;
+- Elf_Addr EndAddr = paShdrs[i].sh_addr + paShdrs[i].sh_size;
+- if (pModElf->cbImage < EndAddr)
+- {
+- pModElf->cbImage = (size_t)EndAddr;
+- AssertMsgBreakStmt(pModElf->cbImage == EndAddr, (FMT_ELF_ADDR "\n", EndAddr), rc = VERR_IMAGE_TOO_BIG);
+- }
+- Log2(("RTLdrElf: %s: Assigned " FMT_ELF_ADDR " to section #%d\n", pszLogName, paShdrs[i].sh_addr, i));
++ pModElf->fShdrInOrder = false;
++ break;
+ }
++ }
+
+- Log2(("RTLdrElf: iSymSh=%u cSyms=%u iStrSh=%u cbStr=%u rc=%Rrc cbImage=%#zx LinkAddress=" FMT_ELF_ADDR "\n",
+- pModElf->iSymSh, pModElf->cSyms, pModElf->iStrSh, pModElf->cbStr, rc,
+- pModElf->cbImage, pModElf->LinkAddress));
++ Log2(("RTLdrElf: iSymSh=%u cSyms=%u iStrSh=%u cbStr=%u rc=%Rrc cbImage=%#zx LinkAddress=" FMT_ELF_ADDR " fShdrInOrder=%RTbool\n",
++ pModElf->Rel.iSymSh, pModElf->Rel.cSyms, pModElf->Rel.iStrSh, pModElf->Rel.cbStr, rc,
++ pModElf->cbImage, pModElf->LinkAddress, pModElf->fShdrInOrder));
+ if (RT_SUCCESS(rc))
+ {
+ pModElf->Core.pOps = &RTLDRELF_MID(s_rtldrElf,Ops);
+@@ -2077,6 +3018,7 @@ static int RTLDRELF_NAME(Open)(PRTLDRREA
+ #undef RTLDRELF_MID
+
+ #undef FMT_ELF_ADDR
++#undef FMT_ELF_ADDR7
+ #undef FMT_ELF_HALF
+ #undef FMT_ELF_SHALF
+ #undef FMT_ELF_OFF
+@@ -2102,6 +3044,8 @@ static int RTLDRELF_NAME(Open)(PRTLDRREA
+ #undef Elf_Size
+ #undef Elf_Sword
+ #undef Elf_Word
++#undef Elf_Xword
++#undef Elf_Sxword
+
+ #undef RTLDRMODELF
+ #undef PRTLDRMODELF
+--- a/include/iprt/memobj.h
++++ b/include/iprt/memobj.h
+@@ -127,7 +127,10 @@ RTR0DECL(int) RTR0MemObjFree(RTR0MEMOBJ
+ * @returns IPRT status code.
+ * @param pMemObj Where to store the ring-0 memory object handle.
+ * @param cb Number of bytes to allocate. This is rounded up to nearest page.
+- * @param fExecutable Flag indicating whether it should be permitted to executed code in the memory object.
++ * @param fExecutable Flag indicating whether it should be permitted to
++ * executed code in the memory object. The user must
++ * use RTR0MemObjProtect after initialization the
++ * allocation to actually make it executable.
+ */
+ #define RTR0MemObjAllocPage(pMemObj, cb, fExecutable) \
+ RTR0MemObjAllocPageTag((pMemObj), (cb), (fExecutable), RTMEM_TAG)
+@@ -140,7 +143,10 @@ RTR0DECL(int) RTR0MemObjFree(RTR0MEMOBJ
+ * @returns IPRT status code.
+ * @param pMemObj Where to store the ring-0 memory object handle.
+ * @param cb Number of bytes to allocate. This is rounded up to nearest page.
+- * @param fExecutable Flag indicating whether it should be permitted to executed code in the memory object.
++ * @param fExecutable Flag indicating whether it should be permitted to
++ * executed code in the memory object. The user must
++ * use RTR0MemObjProtect after initialization the
++ * allocation to actually make it executable.
+ * @param pszTag Allocation tag used for statistics and such.
+ */
+ RTR0DECL(int) RTR0MemObjAllocPageTag(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable, const char *pszTag);
+@@ -154,7 +160,10 @@ RTR0DECL(int) RTR0MemObjAllocPageTag(PRT
+ * @returns IPRT status code.
+ * @param pMemObj Where to store the ring-0 memory object handle.
+ * @param cb Number of bytes to allocate. This is rounded up to nearest page.
+- * @param fExecutable Flag indicating whether it should be permitted to executed code in the memory object.
++ * @param fExecutable Flag indicating whether it should be permitted to
++ * executed code in the memory object. The user must
++ * use RTR0MemObjProtect after initialization the
++ * allocation to actually make it executable.
+ */
+ #define RTR0MemObjAllocLow(pMemObj, cb, fExecutable) \
+ RTR0MemObjAllocLowTag((pMemObj), (cb), (fExecutable), RTMEM_TAG)
+@@ -168,7 +177,10 @@ RTR0DECL(int) RTR0MemObjAllocPageTag(PRT
+ * @returns IPRT status code.
+ * @param pMemObj Where to store the ring-0 memory object handle.
+ * @param cb Number of bytes to allocate. This is rounded up to nearest page.
+- * @param fExecutable Flag indicating whether it should be permitted to executed code in the memory object.
++ * @param fExecutable Flag indicating whether it should be permitted to
++ * executed code in the memory object. The user must
++ * use RTR0MemObjProtect after initialization the
++ * allocation to actually make it executable.
+ * @param pszTag Allocation tag used for statistics and such.
+ */
+ RTR0DECL(int) RTR0MemObjAllocLowTag(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable, const char *pszTag);
+@@ -182,7 +194,10 @@ RTR0DECL(int) RTR0MemObjAllocLowTag(PRTR
+ * @returns IPRT status code.
+ * @param pMemObj Where to store the ring-0 memory object handle.
+ * @param cb Number of bytes to allocate. This is rounded up to nearest page.
+- * @param fExecutable Flag indicating whether it should be permitted to executed code in the memory object.
++ * @param fExecutable Flag indicating whether it should be permitted to
++ * executed code in the memory object. The user must
++ * use RTR0MemObjProtect after initialization the
++ * allocation to actually make it executable.
+ */
+ #define RTR0MemObjAllocCont(pMemObj, cb, fExecutable) \
+ RTR0MemObjAllocContTag((pMemObj), (cb), (fExecutable), RTMEM_TAG)
+@@ -196,7 +211,10 @@ RTR0DECL(int) RTR0MemObjAllocLowTag(PRTR
+ * @returns IPRT status code.
+ * @param pMemObj Where to store the ring-0 memory object handle.
+ * @param cb Number of bytes to allocate. This is rounded up to nearest page.
+- * @param fExecutable Flag indicating whether it should be permitted to executed code in the memory object.
++ * @param fExecutable Flag indicating whether it should be permitted to
++ * executed code in the memory object. The user must
++ * use RTR0MemObjProtect after initialization the
++ * allocation to actually make it executable.
+ * @param pszTag Allocation tag used for statistics and such.
+ */
+ RTR0DECL(int) RTR0MemObjAllocContTag(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable, const char *pszTag);
+--- a/src/VBox/Runtime/r0drv/linux/alloc-r0drv-linux.c
++++ b/src/VBox/Runtime/r0drv/linux/alloc-r0drv-linux.c
+@@ -38,7 +38,7 @@
+
+
+ #if (defined(RT_ARCH_AMD64) || defined(DOXYGEN_RUNNING)) && !defined(RTMEMALLOC_EXEC_HEAP)
+-# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)
++# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23) && LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+ /**
+ * Starting with 2.6.23 we can use __get_vm_area and map_vm_area to allocate
+ * memory in the moduel range. This is preferrable to the exec heap below.
+--- a/include/VBox/sup.h
++++ b/include/VBox/sup.h
+@@ -1553,8 +1553,11 @@ SUPR3DECL(int) SUPR3GetSymbolR0(void *pv
+ *
+ * @returns VBox status code.
+ * @deprecated Use SUPR3LoadModule(pszFilename, "VMMR0.r0", &pvImageBase)
++ * @param pszFilename Full path to the VMMR0.r0 file (silly).
++ * @param pErrInfo Where to return extended error information.
++ * Optional.
+ */
+-SUPR3DECL(int) SUPR3LoadVMM(const char *pszFilename);
++SUPR3DECL(int) SUPR3LoadVMM(const char *pszFilename, PRTERRINFO pErrInfo);
+
+ /**
+ * Unloads R0 HC VMM code.
+--- a/src/VBox/Devices/Network/testcase/tstIntNet-1.cpp
++++ b/src/VBox/Devices/Network/testcase/tstIntNet-1.cpp
+@@ -846,7 +846,7 @@ extern "C" DECLEXPORT(int) TrustedMain(i
+ return 1;
+ }
+
+- rc = SUPR3LoadVMM(szAbsPath);
++ rc = SUPR3LoadVMM(szAbsPath, NULL);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("tstIntNet-1: SUPR3LoadVMM(\"%s\") -> %Rrc\n", szAbsPath, rc);
+--- a/src/VBox/NetworkServices/Dhcpd/VBoxNetDhcpd.cpp
++++ b/src/VBox/NetworkServices/Dhcpd/VBoxNetDhcpd.cpp
+@@ -259,7 +259,7 @@ int VBoxNetDhcpd::vmmInit()
+ if (RT_SUCCESS(rc))
+ rc = RTPathAppend(szPathVMMR0, sizeof(szPathVMMR0), "VMMR0.r0");
+ if (RT_SUCCESS(rc))
+- rc = SUPR3LoadVMM(szPathVMMR0);
++ rc = SUPR3LoadVMM(szPathVMMR0, NULL /*pErrInfo*/);
+ return rc;
+ }
+
+--- a/src/VBox/NetworkServices/NetLib/VBoxNetBaseService.cpp
++++ b/src/VBox/NetworkServices/NetLib/VBoxNetBaseService.cpp
+@@ -383,7 +383,7 @@ int VBoxNetBaseService::tryGoOnline(void
+ return rc;
+ }
+
+- rc = SUPR3LoadVMM(strcat(szPath, "/VMMR0.r0"));
++ rc = SUPR3LoadVMM(strcat(szPath, "/VMMR0.r0"), NULL);
+ if (RT_FAILURE(rc))
+ {
+ LogRel(("VBoxNetBaseService: SUPR3LoadVMM(\"%s\") -> %Rrc\n", szPath, rc));
+--- a/src/VBox/VMM/testcase/tstGlobalConfig.cpp
++++ b/src/VBox/VMM/testcase/tstGlobalConfig.cpp
+@@ -102,7 +102,7 @@ extern "C" DECLEXPORT(int) TrustedMain(i
+ return 1;
+ }
+
+- rc = SUPR3LoadVMM("./VMMR0.r0");
++ rc = SUPR3LoadVMM("./VMMR0.r0", NULL /*pErrInfo*/);
+ if (RT_SUCCESS(rc))
+ {
+ Req.pSession = pSession;
+--- a/src/VBox/HostDrivers/Support/SUPLibLdr.cpp
++++ b/src/VBox/HostDrivers/Support/SUPLibLdr.cpp
+@@ -334,6 +334,372 @@ static DECLCALLBACK(int) supLoadModuleCr
+ }
+
+
++/** Argument package for supLoadModuleCompileSegmentsCB. */
++typedef struct SUPLDRCOMPSEGTABARGS
++{
++ uint32_t uStartRva;
++ uint32_t uEndRva;
++ uint32_t fProt;
++ uint32_t iSegs;
++ uint32_t cSegsAlloc;
++ PSUPLDRSEG paSegs;
++ PRTERRINFO pErrInfo;
++} SUPLDRCOMPSEGTABARGS, *PSUPLDRCOMPSEGTABARGS;
++
++/**
++ * @callback_method_impl{FNRTLDRENUMSEGS,
++ * Compile list of segments with the same memory protection.}
++ */
++static DECLCALLBACK(int) supLoadModuleCompileSegmentsCB(RTLDRMOD hLdrMod, PCRTLDRSEG pSeg, void *pvUser)
++{
++ PSUPLDRCOMPSEGTABARGS pArgs = (PSUPLDRCOMPSEGTABARGS)pvUser;
++ AssertCompile(RTMEM_PROT_READ == SUPLDR_PROT_READ);
++ AssertCompile(RTMEM_PROT_WRITE == SUPLDR_PROT_WRITE);
++ AssertCompile(RTMEM_PROT_EXEC == SUPLDR_PROT_EXEC);
++ RT_NOREF(hLdrMod);
++
++ Log2(("supLoadModuleCompileSegmentsCB: %RTptr/%RTptr LB %RTptr/%RTptr prot %#x %s\n",
++ pSeg->LinkAddress, pSeg->RVA, pSeg->cbMapped, pSeg->cb, pSeg->fProt, pSeg->pszName));
++
++ /* Ignore segments not part of the loaded image. */
++ if (pSeg->RVA == NIL_RTLDRADDR || pSeg->cbMapped == 0)
++ {
++ Log2(("supLoadModuleCompileSegmentsCB: -> skipped\n"));
++ return VINF_SUCCESS;
++ }
++
++ /* We currently ASSUME that all relevant segments are in ascending RVA order. */
++ AssertReturn(pSeg->RVA >= pArgs->uEndRva,
++ RTERRINFO_LOG_REL_SET_F(pArgs->pErrInfo, VERR_BAD_EXE_FORMAT, "Out of order segment: %p LB %#zx #%.*s",
++ pSeg->RVA, pSeg->cb, pSeg->cchName, pSeg->pszName));
++
++ /* We ASSUME the cbMapped field is implemented. */
++ AssertReturn(pSeg->cbMapped != NIL_RTLDRADDR, VERR_INTERNAL_ERROR_2);
++ AssertReturn(pSeg->cbMapped < _1G, VERR_INTERNAL_ERROR_4);
++ uint32_t cbMapped = (uint32_t)pSeg->cbMapped;
++ AssertReturn(pSeg->RVA < _1G, VERR_INTERNAL_ERROR_3);
++ uint32_t uRvaSeg = (uint32_t)pSeg->RVA;
++
++ /*
++ * If the protection is the same as the previous segment,
++ * just update uEndRva and continue.
++ */
++ uint32_t fProt = pSeg->fProt;
++#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
++ if (fProt & RTMEM_PROT_EXEC)
++ fProt |= fProt & RTMEM_PROT_READ;
++#endif
++ if (pSeg->fProt == pArgs->fProt)
++ {
++ pArgs->uEndRva = uRvaSeg + cbMapped;
++ Log2(("supLoadModuleCompileSegmentsCB: -> merged, end %#x\n", pArgs->uEndRva));
++ return VINF_SUCCESS;
++ }
++
++ /*
++ * The protection differs, so commit current segment and start a new one.
++ * However, if the new segment and old segment share a page, this becomes
++ * a little more complicated...
++ */
++ if (pArgs->uStartRva < pArgs->uEndRva)
++ {
++ if (((pArgs->uEndRva - 1) >> PAGE_SHIFT) != (uRvaSeg >> PAGE_SHIFT))
++ {
++ /* No common page, so make the new segment start on a page boundrary. */
++ cbMapped += uRvaSeg & PAGE_OFFSET_MASK;
++ uRvaSeg &= ~(uint32_t)PAGE_OFFSET_MASK;
++ Assert(pArgs->uEndRva <= uRvaSeg);
++ Log2(("supLoadModuleCompileSegmentsCB: -> new, no common\n"));
++ }
++ else if ((fProt & pArgs->fProt) == fProt)
++ {
++ /* The current segment includes the memory protections of the
++ previous, so include the common page in it: */
++ uint32_t const cbCommon = PAGE_SIZE - (uRvaSeg & PAGE_OFFSET_MASK);
++ if (cbCommon >= cbMapped)
++ {
++ pArgs->uEndRva = uRvaSeg + cbMapped;
++ Log2(("supLoadModuleCompileSegmentsCB: -> merge, %#x common, upgrading prot to %#x, end %#x\n",
++ cbCommon, pArgs->fProt, pArgs->uEndRva));
++ return VINF_SUCCESS; /* New segment was smaller than a page. */
++ }
++ cbMapped -= cbCommon;
++ uRvaSeg += cbCommon;
++ Assert(pArgs->uEndRva <= uRvaSeg);
++ Log2(("supLoadModuleCompileSegmentsCB: -> new, %#x common into previous\n", cbCommon));
++ }
++ else if ((fProt & pArgs->fProt) == pArgs->fProt)
++ {
++ /* The new segment includes the memory protections of the
++ previous, so include the common page in it: */
++ cbMapped += uRvaSeg & PAGE_OFFSET_MASK;
++ uRvaSeg &= ~(uint32_t)PAGE_OFFSET_MASK;
++ if (uRvaSeg == pArgs->uStartRva)
++ {
++ pArgs->fProt = fProt;
++ pArgs->uEndRva = uRvaSeg + cbMapped;
++ Log2(("supLoadModuleCompileSegmentsCB: -> upgrade current protection, end %#x\n", pArgs->uEndRva));
++ return VINF_SUCCESS; /* Current segment was smaller than a page. */
++ }
++ Log2(("supLoadModuleCompileSegmentsCB: -> new, %#x common into new\n", (uint32_t)(pSeg->RVA & PAGE_OFFSET_MASK)));
++ }
++ else
++ {
++ /* Create a new segment for the common page with the combined protection. */
++ Log2(("supLoadModuleCompileSegmentsCB: -> it's complicated...\n"));
++ pArgs->uEndRva &= ~(uint32_t)PAGE_OFFSET_MASK;
++ if (pArgs->uEndRva > pArgs->uStartRva)
++ {
++ Log2(("supLoadModuleCompileSegmentsCB: SUP Seg #%u: %#x LB %#x prot %#x\n",
++ pArgs->iSegs, pArgs->uStartRva, pArgs->uEndRva - pArgs->uStartRva, pArgs->fProt));
++ if (pArgs->paSegs)
++ {
++ AssertReturn(pArgs->iSegs < pArgs->cSegsAlloc, VERR_INTERNAL_ERROR_5);
++ pArgs->paSegs[pArgs->iSegs].off = pArgs->uStartRva;
++ pArgs->paSegs[pArgs->iSegs].cb = pArgs->uEndRva - pArgs->uStartRva;
++ pArgs->paSegs[pArgs->iSegs].fProt = pArgs->fProt;
++ pArgs->paSegs[pArgs->iSegs].fUnused = 0;
++ }
++ pArgs->iSegs++;
++ pArgs->uStartRva = pArgs->uEndRva;
++ }
++ pArgs->fProt |= fProt;
++
++ uint32_t const cbCommon = PAGE_SIZE - (uRvaSeg & PAGE_OFFSET_MASK);
++ if (cbCommon >= cbMapped)
++ {
++ fProt |= pArgs->fProt;
++ pArgs->uEndRva = uRvaSeg + cbMapped;
++ return VINF_SUCCESS; /* New segment was smaller than a page. */
++ }
++ cbMapped -= cbCommon;
++ uRvaSeg += cbCommon;
++ Assert(uRvaSeg - pArgs->uStartRva == PAGE_SIZE);
++ }
++
++ /* The current segment should end where the new one starts, no gaps. */
++ pArgs->uEndRva = uRvaSeg;
++
++ /* Emit the current segment */
++ Log2(("supLoadModuleCompileSegmentsCB: SUP Seg #%u: %#x LB %#x prot %#x\n",
++ pArgs->iSegs, pArgs->uStartRva, pArgs->uEndRva - pArgs->uStartRva, pArgs->fProt));
++ if (pArgs->paSegs)
++ {
++ AssertReturn(pArgs->iSegs < pArgs->cSegsAlloc, VERR_INTERNAL_ERROR_5);
++ pArgs->paSegs[pArgs->iSegs].off = pArgs->uStartRva;
++ pArgs->paSegs[pArgs->iSegs].cb = pArgs->uEndRva - pArgs->uStartRva;
++ pArgs->paSegs[pArgs->iSegs].fProt = pArgs->fProt;
++ pArgs->paSegs[pArgs->iSegs].fUnused = 0;
++ }
++ pArgs->iSegs++;
++ }
++ /* else: current segment is empty */
++
++ /* Start the new segment. */
++ Assert(!(uRvaSeg & PAGE_OFFSET_MASK));
++ pArgs->fProt = fProt;
++ pArgs->uStartRva = uRvaSeg;
++ pArgs->uEndRva = uRvaSeg + cbMapped;
++ return VINF_SUCCESS;
++}
++
++
++/**
++ * Worker for supLoadModule().
++ */
++static int supLoadModuleInner(RTLDRMOD hLdrMod, PSUPLDRLOAD pLoadReq, uint32_t cbImageWithEverything,
++ RTR0PTR uImageBase, size_t cbImage, const char *pszModule, const char *pszFilename,
++ bool fNativeLoader, bool fIsVMMR0, const char *pszSrvReqHandler,
++ uint32_t offSymTab, uint32_t cSymbols,
++ uint32_t offStrTab, size_t cbStrTab,
++ uint32_t offSegTab, uint32_t cSegments,
++ PRTERRINFO pErrInfo)
++{
++ /*
++ * Get the image bits.
++ */
++ SUPLDRRESIMPARGS Args = { pszModule, pErrInfo };
++ int rc = RTLdrGetBits(hLdrMod, &pLoadReq->u.In.abImage[0], uImageBase, supLoadModuleResolveImport, &Args);
++ if (RT_FAILURE(rc))
++ {
++ LogRel(("SUP: RTLdrGetBits failed for %s (%s). rc=%Rrc\n", pszModule, pszFilename, rc));
++ if (!RTErrInfoIsSet(pErrInfo))
++ RTErrInfoSetF(pErrInfo, rc, "RTLdrGetBits failed");
++ return rc;
++ }
++
++ /*
++ * Get the entry points.
++ */
++ RTUINTPTR VMMR0EntryFast = 0;
++ RTUINTPTR VMMR0EntryEx = 0;
++ RTUINTPTR SrvReqHandler = 0;
++ RTUINTPTR ModuleInit = 0;
++ RTUINTPTR ModuleTerm = 0;
++ const char *pszEp = NULL;
++ if (fIsVMMR0)
++ {
++ rc = RTLdrGetSymbolEx(hLdrMod, &pLoadReq->u.In.abImage[0], uImageBase,
++ UINT32_MAX, pszEp = "VMMR0EntryFast", &VMMR0EntryFast);
++ if (RT_SUCCESS(rc))
++ rc = RTLdrGetSymbolEx(hLdrMod, &pLoadReq->u.In.abImage[0], uImageBase,
++ UINT32_MAX, pszEp = "VMMR0EntryEx", &VMMR0EntryEx);
++ }
++ else if (pszSrvReqHandler)
++ rc = RTLdrGetSymbolEx(hLdrMod, &pLoadReq->u.In.abImage[0], uImageBase,
++ UINT32_MAX, pszEp = pszSrvReqHandler, &SrvReqHandler);
++ if (RT_SUCCESS(rc))
++ {
++ int rc2 = RTLdrGetSymbolEx(hLdrMod, &pLoadReq->u.In.abImage[0], uImageBase,
++ UINT32_MAX, pszEp = "ModuleInit", &ModuleInit);
++ if (RT_FAILURE(rc2))
++ ModuleInit = 0;
++
++ rc2 = RTLdrGetSymbolEx(hLdrMod, &pLoadReq->u.In.abImage[0], uImageBase,
++ UINT32_MAX, pszEp = "ModuleTerm", &ModuleTerm);
++ if (RT_FAILURE(rc2))
++ ModuleTerm = 0;
++ }
++ if (RT_FAILURE(rc))
++ {
++ LogRel(("SUP: Failed to get entry point '%s' for %s (%s) rc=%Rrc\n", pszEp, pszModule, pszFilename, rc));
++ return RTErrInfoSetF(pErrInfo, rc, "Failed to resolve entry point '%s'", pszEp);
++ }
++
++ /*
++ * Create the symbol and string tables.
++ */
++ SUPLDRCREATETABSARGS CreateArgs;
++ CreateArgs.cbImage = cbImage;
++ CreateArgs.pSym = (PSUPLDRSYM)&pLoadReq->u.In.abImage[offSymTab];
++ CreateArgs.pszBase = (char *)&pLoadReq->u.In.abImage[offStrTab];
++ CreateArgs.psz = CreateArgs.pszBase;
++ rc = RTLdrEnumSymbols(hLdrMod, 0, NULL, 0, supLoadModuleCreateTabsCB, &CreateArgs);
++ if (RT_FAILURE(rc))
++ {
++ LogRel(("SUP: RTLdrEnumSymbols failed for %s (%s) rc=%Rrc\n", pszModule, pszFilename, rc));
++ return RTErrInfoSetF(pErrInfo, rc, "RTLdrEnumSymbols #2 failed");
++ }
++ AssertRelease((size_t)(CreateArgs.psz - CreateArgs.pszBase) <= cbStrTab);
++ AssertRelease((size_t)(CreateArgs.pSym - (PSUPLDRSYM)&pLoadReq->u.In.abImage[offSymTab]) <= cSymbols);
++
++ /*
++ * Create the segment table.
++ */
++ SUPLDRCOMPSEGTABARGS SegArgs;
++ SegArgs.uStartRva = 0;
++ SegArgs.uEndRva = 0;
++ SegArgs.fProt = RTMEM_PROT_READ;
++ SegArgs.iSegs = 0;
++ SegArgs.cSegsAlloc = cSegments;
++ SegArgs.paSegs = (PSUPLDRSEG)&pLoadReq->u.In.abImage[offSegTab];
++ SegArgs.pErrInfo = pErrInfo;
++ rc = RTLdrEnumSegments(hLdrMod, supLoadModuleCompileSegmentsCB, &SegArgs);
++ if (RT_FAILURE(rc))
++ {
++ LogRel(("SUP: RTLdrEnumSegments failed for %s (%s) rc=%Rrc\n", pszModule, pszFilename, rc));
++ return RTErrInfoSetF(pErrInfo, rc, "RTLdrEnumSegments #2 failed");
++ }
++ SegArgs.uEndRva = (uint32_t)cbImage;
++ AssertReturn(SegArgs.uEndRva == cbImage, VERR_OUT_OF_RANGE);
++ if (SegArgs.uEndRva > SegArgs.uStartRva)
++ {
++ SegArgs.paSegs[SegArgs.iSegs].off = SegArgs.uStartRva;
++ SegArgs.paSegs[SegArgs.iSegs].cb = SegArgs.uEndRva - SegArgs.uStartRva;
++ SegArgs.paSegs[SegArgs.iSegs].fProt = SegArgs.fProt;
++ SegArgs.paSegs[SegArgs.iSegs].fUnused = 0;
++ SegArgs.iSegs++;
++ }
++ for (uint32_t i = 0; i < SegArgs.iSegs; i++)
++ LogRel(("SUP: seg #%u: %c%c%c %#010RX32 LB %#010RX32\n", i, /** @todo LogRel2 */
++ SegArgs.paSegs[i].fProt & SUPLDR_PROT_READ ? 'R' : ' ',
++ SegArgs.paSegs[i].fProt & SUPLDR_PROT_WRITE ? 'W' : ' ',
++ SegArgs.paSegs[i].fProt & SUPLDR_PROT_EXEC ? 'X' : ' ',
++ SegArgs.paSegs[i].off, SegArgs.paSegs[i].cb));
++ AssertRelease(SegArgs.iSegs == cSegments);
++ AssertRelease(SegArgs.cSegsAlloc == cSegments);
++
++ /*
++ * Upload the image.
++ */
++ pLoadReq->Hdr.u32Cookie = g_u32Cookie;
++ pLoadReq->Hdr.u32SessionCookie = g_u32SessionCookie;
++ pLoadReq->Hdr.cbIn = SUP_IOCTL_LDR_LOAD_SIZE_IN(cbImageWithEverything);
++ pLoadReq->Hdr.cbOut = SUP_IOCTL_LDR_LOAD_SIZE_OUT;
++ pLoadReq->Hdr.fFlags = SUPREQHDR_FLAGS_MAGIC | SUPREQHDR_FLAGS_EXTRA_IN;
++ pLoadReq->Hdr.rc = VERR_INTERNAL_ERROR;
++
++ pLoadReq->u.In.pfnModuleInit = (RTR0PTR)ModuleInit;
++ pLoadReq->u.In.pfnModuleTerm = (RTR0PTR)ModuleTerm;
++ if (fIsVMMR0)
++ {
++ pLoadReq->u.In.eEPType = SUPLDRLOADEP_VMMR0;
++ pLoadReq->u.In.EP.VMMR0.pvVMMR0 = uImageBase;
++ pLoadReq->u.In.EP.VMMR0.pvVMMR0EntryFast= (RTR0PTR)VMMR0EntryFast;
++ pLoadReq->u.In.EP.VMMR0.pvVMMR0EntryEx = (RTR0PTR)VMMR0EntryEx;
++ }
++ else if (pszSrvReqHandler)
++ {
++ pLoadReq->u.In.eEPType = SUPLDRLOADEP_SERVICE;
++ pLoadReq->u.In.EP.Service.pfnServiceReq = (RTR0PTR)SrvReqHandler;
++ pLoadReq->u.In.EP.Service.apvReserved[0] = NIL_RTR0PTR;
++ pLoadReq->u.In.EP.Service.apvReserved[1] = NIL_RTR0PTR;
++ pLoadReq->u.In.EP.Service.apvReserved[2] = NIL_RTR0PTR;
++ }
++ else
++ pLoadReq->u.In.eEPType = SUPLDRLOADEP_NOTHING;
++ pLoadReq->u.In.offStrTab = offStrTab;
++ pLoadReq->u.In.cbStrTab = (uint32_t)cbStrTab;
++ AssertRelease(pLoadReq->u.In.cbStrTab == cbStrTab);
++ pLoadReq->u.In.cbImageBits = (uint32_t)cbImage;
++ pLoadReq->u.In.offSymbols = offSymTab;
++ pLoadReq->u.In.cSymbols = cSymbols;
++ pLoadReq->u.In.offSegments = offSegTab;
++ pLoadReq->u.In.cSegments = cSegments;
++ pLoadReq->u.In.cbImageWithEverything = cbImageWithEverything;
++ pLoadReq->u.In.pvImageBase = uImageBase;
++ if (!g_uSupFakeMode)
++ {
++ rc = suplibOsIOCtl(&g_supLibData, SUP_IOCTL_LDR_LOAD, pLoadReq, SUP_IOCTL_LDR_LOAD_SIZE(cbImageWithEverything));
++ if (RT_SUCCESS(rc))
++ rc = pLoadReq->Hdr.rc;
++ else
++ LogRel(("SUP: SUP_IOCTL_LDR_LOAD ioctl for %s (%s) failed rc=%Rrc\n", pszModule, pszFilename, rc));
++ }
++ else
++ rc = VINF_SUCCESS;
++ if ( RT_SUCCESS(rc)
++ || rc == VERR_ALREADY_LOADED /* A competing process. */
++ )
++ {
++ LogRel(("SUP: Loaded %s (%s) at %#RKv - ModuleInit at %RKv and ModuleTerm at %RKv%s\n",
++ pszModule, pszFilename, uImageBase, (RTR0PTR)ModuleInit, (RTR0PTR)ModuleTerm,
++ fNativeLoader ? " using the native ring-0 loader" : ""));
++ if (fIsVMMR0)
++ {
++ g_pvVMMR0 = uImageBase;
++ LogRel(("SUP: VMMR0EntryEx located at %RKv and VMMR0EntryFast at %RKv\n", (RTR0PTR)VMMR0EntryEx, (RTR0PTR)VMMR0EntryFast));
++ }
++#ifdef RT_OS_WINDOWS
++ LogRel(("SUP: windbg> .reload /f %s=%#RKv\n", pszFilename, uImageBase));
++#endif
++ return VINF_SUCCESS;
++ }
++
++ /*
++ * Failed, bail out.
++ */
++ LogRel(("SUP: Loading failed for %s (%s) rc=%Rrc\n", pszModule, pszFilename, rc));
++ if ( pLoadReq->u.Out.uErrorMagic == SUPLDRLOAD_ERROR_MAGIC
++ && pLoadReq->u.Out.szError[0] != '\0')
++ {
++ LogRel(("SUP: %s\n", pLoadReq->u.Out.szError));
++ return RTErrInfoSet(pErrInfo, rc, pLoadReq->u.Out.szError);
++ }
++ return RTErrInfoSet(pErrInfo, rc, "SUP_IOCTL_LDR_LOAD failed");
++}
++
++
+ /**
+ * Worker for SUPR3LoadModule().
+ *
+@@ -356,6 +722,7 @@ static int supLoadModule(const char *psz
+ AssertPtrReturn(pszFilename, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(pszModule, VERR_INVALID_PARAMETER);
+ AssertPtrReturn(ppvImageBase, VERR_INVALID_PARAMETER);
++ /** @todo abspath it right into SUPLDROPEN */
+ AssertReturn(strlen(pszModule) < RT_SIZEOFMEMB(SUPLDROPEN, u.In.szName), VERR_FILENAME_TOO_LONG);
+ char szAbsFilename[RT_SIZEOFMEMB(SUPLDROPEN, u.In.szFilename)];
+ rc = RTPathAbs(pszFilename, szAbsFilename, sizeof(szAbsFilename));
+@@ -371,8 +738,8 @@ static int supLoadModule(const char *psz
+ * Open image file and figure its size.
+ */
+ RTLDRMOD hLdrMod;
+- rc = RTLdrOpen(pszFilename, 0, RTLDRARCH_HOST, &hLdrMod);
+- if (!RT_SUCCESS(rc))
++ rc = RTLdrOpenEx(pszFilename, 0 /*fFlags*/, RTLDRARCH_HOST, &hLdrMod, pErrInfo);
++ if (RT_FAILURE(rc))
+ {
+ LogRel(("SUP: RTLdrOpen failed for %s (%s) %Rrc\n", pszModule, pszFilename, rc));
+ return rc;
+@@ -385,230 +752,109 @@ static int supLoadModule(const char *psz
+ rc = RTLdrEnumSymbols(hLdrMod, 0, NULL, 0, supLoadModuleCalcSizeCB, &CalcArgs);
+ if (RT_SUCCESS(rc))
+ {
+- const uint32_t offSymTab = RT_ALIGN_32(CalcArgs.cbImage, 8);
+- const uint32_t offStrTab = offSymTab + CalcArgs.cSymbols * sizeof(SUPLDRSYM);
+- const uint32_t cbImageWithTabs = RT_ALIGN_32(offStrTab + CalcArgs.cbStrings, 8);
+-
+ /*
+- * Open the R0 image.
++ * Figure out the number of segments needed first.
+ */
+- SUPLDROPEN OpenReq;
+- OpenReq.Hdr.u32Cookie = g_u32Cookie;
+- OpenReq.Hdr.u32SessionCookie = g_u32SessionCookie;
+- OpenReq.Hdr.cbIn = SUP_IOCTL_LDR_OPEN_SIZE_IN;
+- OpenReq.Hdr.cbOut = SUP_IOCTL_LDR_OPEN_SIZE_OUT;
+- OpenReq.Hdr.fFlags = SUPREQHDR_FLAGS_DEFAULT;
+- OpenReq.Hdr.rc = VERR_INTERNAL_ERROR;
+- OpenReq.u.In.cbImageWithTabs = cbImageWithTabs;
+- OpenReq.u.In.cbImageBits = (uint32_t)CalcArgs.cbImage;
+- strcpy(OpenReq.u.In.szName, pszModule);
+- strcpy(OpenReq.u.In.szFilename, pszFilename);
+- if (!g_uSupFakeMode)
+- {
+- rc = suplibOsIOCtl(&g_supLibData, SUP_IOCTL_LDR_OPEN, &OpenReq, SUP_IOCTL_LDR_OPEN_SIZE);
+- if (RT_SUCCESS(rc))
+- rc = OpenReq.Hdr.rc;
+- }
+- else
+- {
+- OpenReq.u.Out.fNeedsLoading = true;
+- OpenReq.u.Out.pvImageBase = 0xef423420;
+- }
+- *ppvImageBase = (void *)OpenReq.u.Out.pvImageBase;
+- if ( RT_SUCCESS(rc)
+- && OpenReq.u.Out.fNeedsLoading)
++ SUPLDRCOMPSEGTABARGS SegArgs;
++ SegArgs.uStartRva = 0;
++ SegArgs.uEndRva = 0;
++ SegArgs.fProt = RTMEM_PROT_READ;
++ SegArgs.iSegs = 0;
++ SegArgs.cSegsAlloc = 0;
++ SegArgs.paSegs = NULL;
++ SegArgs.pErrInfo = pErrInfo;
++ rc = RTLdrEnumSegments(hLdrMod, supLoadModuleCompileSegmentsCB, &SegArgs);
++ if (RT_SUCCESS(rc))
+ {
++ Assert(SegArgs.uEndRva <= RTLdrSize(hLdrMod));
++ SegArgs.uEndRva = (uint32_t)CalcArgs.cbImage; /* overflow is checked later */
++ if (SegArgs.uEndRva > SegArgs.uStartRva)
++ {
++ Log2(("supLoadModule: SUP Seg #%u: %#x LB %#x prot %#x\n",
++ SegArgs.iSegs, SegArgs.uStartRva, SegArgs.uEndRva - SegArgs.uStartRva, SegArgs.fProt));
++ SegArgs.iSegs++;
++ }
++
++ const uint32_t offSymTab = RT_ALIGN_32(CalcArgs.cbImage, 8);
++ const uint32_t offStrTab = offSymTab + CalcArgs.cSymbols * sizeof(SUPLDRSYM);
++ const uint32_t offSegTab = RT_ALIGN_32(offStrTab + CalcArgs.cbStrings, 8);
++ const uint32_t cbImageWithEverything = RT_ALIGN_32(offSegTab + sizeof(SUPLDRSEG) * SegArgs.iSegs, 8);
++
+ /*
+- * We need to load it.
+- * Allocate memory for the image bits.
++ * Open the R0 image.
+ */
+- PSUPLDRLOAD pLoadReq = (PSUPLDRLOAD)RTMemTmpAlloc(SUP_IOCTL_LDR_LOAD_SIZE(cbImageWithTabs));
+- if (pLoadReq)
++ SUPLDROPEN OpenReq;
++ OpenReq.Hdr.u32Cookie = g_u32Cookie;
++ OpenReq.Hdr.u32SessionCookie = g_u32SessionCookie;
++ OpenReq.Hdr.cbIn = SUP_IOCTL_LDR_OPEN_SIZE_IN;
++ OpenReq.Hdr.cbOut = SUP_IOCTL_LDR_OPEN_SIZE_OUT;
++ OpenReq.Hdr.fFlags = SUPREQHDR_FLAGS_DEFAULT;
++ OpenReq.Hdr.rc = VERR_INTERNAL_ERROR;
++ OpenReq.u.In.cbImageWithEverything = cbImageWithEverything;
++ OpenReq.u.In.cbImageBits = (uint32_t)CalcArgs.cbImage;
++ strcpy(OpenReq.u.In.szName, pszModule);
++ strcpy(OpenReq.u.In.szFilename, pszFilename);
++ if (!g_uSupFakeMode)
++ {
++ rc = suplibOsIOCtl(&g_supLibData, SUP_IOCTL_LDR_OPEN, &OpenReq, SUP_IOCTL_LDR_OPEN_SIZE);
++ if (RT_SUCCESS(rc))
++ rc = OpenReq.Hdr.rc;
++ }
++ else
++ {
++ OpenReq.u.Out.fNeedsLoading = true;
++ OpenReq.u.Out.pvImageBase = 0xef423420;
++ }
++ *ppvImageBase = (void *)OpenReq.u.Out.pvImageBase;
++ if ( RT_SUCCESS(rc)
++ && OpenReq.u.Out.fNeedsLoading)
+ {
+ /*
+- * Get the image bits.
++ * We need to load it.
++ *
++ * Allocate the request and pass it to an inner work function
++ * that populates it and sends it off to the driver.
+ */
+-
+- SUPLDRRESIMPARGS Args = { pszModule, pErrInfo };
+- rc = RTLdrGetBits(hLdrMod, &pLoadReq->u.In.abImage[0], (uintptr_t)OpenReq.u.Out.pvImageBase,
+- supLoadModuleResolveImport, &Args);
+-
+- if (RT_SUCCESS(rc))
++ const uint32_t cbLoadReq = SUP_IOCTL_LDR_LOAD_SIZE(cbImageWithEverything);
++ PSUPLDRLOAD pLoadReq = (PSUPLDRLOAD)RTMemTmpAlloc(cbLoadReq);
++ if (pLoadReq)
+ {
+- /*
+- * Get the entry points.
+- */
+- RTUINTPTR VMMR0EntryFast = 0;
+- RTUINTPTR VMMR0EntryEx = 0;
+- RTUINTPTR SrvReqHandler = 0;
+- RTUINTPTR ModuleInit = 0;
+- RTUINTPTR ModuleTerm = 0;
+- const char *pszEp = NULL;
+- if (fIsVMMR0)
+- {
+- rc = RTLdrGetSymbolEx(hLdrMod, &pLoadReq->u.In.abImage[0], (uintptr_t)OpenReq.u.Out.pvImageBase,
+- UINT32_MAX, pszEp = "VMMR0EntryFast", &VMMR0EntryFast);
+- if (RT_SUCCESS(rc))
+- rc = RTLdrGetSymbolEx(hLdrMod, &pLoadReq->u.In.abImage[0], (uintptr_t)OpenReq.u.Out.pvImageBase,
+- UINT32_MAX, pszEp = "VMMR0EntryEx", &VMMR0EntryEx);
+- }
+- else if (pszSrvReqHandler)
+- rc = RTLdrGetSymbolEx(hLdrMod, &pLoadReq->u.In.abImage[0], (uintptr_t)OpenReq.u.Out.pvImageBase,
+- UINT32_MAX, pszEp = pszSrvReqHandler, &SrvReqHandler);
+- if (RT_SUCCESS(rc))
+- {
+- int rc2 = RTLdrGetSymbolEx(hLdrMod, &pLoadReq->u.In.abImage[0], (uintptr_t)OpenReq.u.Out.pvImageBase,
+- UINT32_MAX, pszEp = "ModuleInit", &ModuleInit);
+- if (RT_FAILURE(rc2))
+- ModuleInit = 0;
+-
+- rc2 = RTLdrGetSymbolEx(hLdrMod, &pLoadReq->u.In.abImage[0], (uintptr_t)OpenReq.u.Out.pvImageBase,
+- UINT32_MAX, pszEp = "ModuleTerm", &ModuleTerm);
+- if (RT_FAILURE(rc2))
+- ModuleTerm = 0;
+- }
+- if (RT_SUCCESS(rc))
+- {
+- /*
+- * Create the symbol and string tables.
+- */
+- SUPLDRCREATETABSARGS CreateArgs;
+- CreateArgs.cbImage = CalcArgs.cbImage;
+- CreateArgs.pSym = (PSUPLDRSYM)&pLoadReq->u.In.abImage[offSymTab];
+- CreateArgs.pszBase = (char *)&pLoadReq->u.In.abImage[offStrTab];
+- CreateArgs.psz = CreateArgs.pszBase;
+- rc = RTLdrEnumSymbols(hLdrMod, 0, NULL, 0, supLoadModuleCreateTabsCB, &CreateArgs);
+- if (RT_SUCCESS(rc))
+- {
+- AssertRelease((size_t)(CreateArgs.psz - CreateArgs.pszBase) <= CalcArgs.cbStrings);
+- AssertRelease((size_t)(CreateArgs.pSym - (PSUPLDRSYM)&pLoadReq->u.In.abImage[offSymTab]) <= CalcArgs.cSymbols);
+-
+- /*
+- * Upload the image.
+- */
+- pLoadReq->Hdr.u32Cookie = g_u32Cookie;
+- pLoadReq->Hdr.u32SessionCookie = g_u32SessionCookie;
+- pLoadReq->Hdr.cbIn = SUP_IOCTL_LDR_LOAD_SIZE_IN(cbImageWithTabs);
+- pLoadReq->Hdr.cbOut = SUP_IOCTL_LDR_LOAD_SIZE_OUT;
+- pLoadReq->Hdr.fFlags = SUPREQHDR_FLAGS_MAGIC | SUPREQHDR_FLAGS_EXTRA_IN;
+- pLoadReq->Hdr.rc = VERR_INTERNAL_ERROR;
+-
+- pLoadReq->u.In.pfnModuleInit = (RTR0PTR)ModuleInit;
+- pLoadReq->u.In.pfnModuleTerm = (RTR0PTR)ModuleTerm;
+- if (fIsVMMR0)
+- {
+- pLoadReq->u.In.eEPType = SUPLDRLOADEP_VMMR0;
+- pLoadReq->u.In.EP.VMMR0.pvVMMR0 = OpenReq.u.Out.pvImageBase;
+- pLoadReq->u.In.EP.VMMR0.pvVMMR0EntryFast= (RTR0PTR)VMMR0EntryFast;
+- pLoadReq->u.In.EP.VMMR0.pvVMMR0EntryEx = (RTR0PTR)VMMR0EntryEx;
+- }
+- else if (pszSrvReqHandler)
+- {
+- pLoadReq->u.In.eEPType = SUPLDRLOADEP_SERVICE;
+- pLoadReq->u.In.EP.Service.pfnServiceReq = (RTR0PTR)SrvReqHandler;
+- pLoadReq->u.In.EP.Service.apvReserved[0] = NIL_RTR0PTR;
+- pLoadReq->u.In.EP.Service.apvReserved[1] = NIL_RTR0PTR;
+- pLoadReq->u.In.EP.Service.apvReserved[2] = NIL_RTR0PTR;
+- }
+- else
+- pLoadReq->u.In.eEPType = SUPLDRLOADEP_NOTHING;
+- pLoadReq->u.In.offStrTab = offStrTab;
+- pLoadReq->u.In.cbStrTab = (uint32_t)CalcArgs.cbStrings;
+- AssertRelease(pLoadReq->u.In.cbStrTab == CalcArgs.cbStrings);
+- pLoadReq->u.In.cbImageBits = (uint32_t)CalcArgs.cbImage;
+- pLoadReq->u.In.offSymbols = offSymTab;
+- pLoadReq->u.In.cSymbols = CalcArgs.cSymbols;
+- pLoadReq->u.In.cbImageWithTabs = cbImageWithTabs;
+- pLoadReq->u.In.pvImageBase = OpenReq.u.Out.pvImageBase;
+- if (!g_uSupFakeMode)
+- {
+- rc = suplibOsIOCtl(&g_supLibData, SUP_IOCTL_LDR_LOAD, pLoadReq, SUP_IOCTL_LDR_LOAD_SIZE(cbImageWithTabs));
+- if (RT_SUCCESS(rc))
+- rc = pLoadReq->Hdr.rc;
+- else
+- LogRel(("SUP: SUP_IOCTL_LDR_LOAD ioctl for %s (%s) failed rc=%Rrc\n", pszModule, pszFilename, rc));
+- }
+- else
+- rc = VINF_SUCCESS;
+- if ( RT_SUCCESS(rc)
+- || rc == VERR_ALREADY_LOADED /* A competing process. */
+- )
+- {
+- LogRel(("SUP: Loaded %s (%s) at %#RKv - ModuleInit at %RKv and ModuleTerm at %RKv%s\n",
+- pszModule, pszFilename, OpenReq.u.Out.pvImageBase, (RTR0PTR)ModuleInit, (RTR0PTR)ModuleTerm,
+- OpenReq.u.Out.fNativeLoader ? " using the native ring-0 loader" : ""));
+- if (fIsVMMR0)
+- {
+- g_pvVMMR0 = OpenReq.u.Out.pvImageBase;
+- LogRel(("SUP: VMMR0EntryEx located at %RKv and VMMR0EntryFast at %RKv\n", (RTR0PTR)VMMR0EntryEx, (RTR0PTR)VMMR0EntryFast));
+- }
+-#ifdef RT_OS_WINDOWS
+- LogRel(("SUP: windbg> .reload /f %s=%#RKv\n", pszFilename, OpenReq.u.Out.pvImageBase));
+-#endif
+-
+- RTMemTmpFree(pLoadReq);
+- RTLdrClose(hLdrMod);
+- return VINF_SUCCESS;
+- }
+-
+- /*
+- * Failed, bail out.
+- */
+- LogRel(("SUP: Loading failed for %s (%s) rc=%Rrc\n", pszModule, pszFilename, rc));
+- if ( pLoadReq->u.Out.uErrorMagic == SUPLDRLOAD_ERROR_MAGIC
+- && pLoadReq->u.Out.szError[0] != '\0')
+- {
+- LogRel(("SUP: %s\n", pLoadReq->u.Out.szError));
+- RTErrInfoSet(pErrInfo, rc, pLoadReq->u.Out.szError);
+- }
+- else
+- RTErrInfoSet(pErrInfo, rc, "SUP_IOCTL_LDR_LOAD failed");
+- }
+- else
+- {
+- LogRel(("SUP: RTLdrEnumSymbols failed for %s (%s) rc=%Rrc\n", pszModule, pszFilename, rc));
+- RTErrInfoSetF(pErrInfo, rc, "RTLdrEnumSymbols #2 failed");
+- }
+- }
+- else
+- {
+- LogRel(("SUP: Failed to get entry point '%s' for %s (%s) rc=%Rrc\n", pszEp, pszModule, pszFilename, rc));
+- RTErrInfoSetF(pErrInfo, rc, "Failed to resolve entry point '%s'", pszEp);
+- }
++ rc = supLoadModuleInner(hLdrMod, pLoadReq, cbImageWithEverything, OpenReq.u.Out.pvImageBase, CalcArgs.cbImage,
++ pszModule, pszFilename, OpenReq.u.Out.fNativeLoader, fIsVMMR0, pszSrvReqHandler,
++ offSymTab, CalcArgs.cSymbols,
++ offStrTab, CalcArgs.cbStrings,
++ offSegTab, SegArgs.iSegs,
++ pErrInfo);
++ RTMemTmpFree(pLoadReq);
+ }
+ else
+ {
+- LogRel(("SUP: RTLdrGetBits failed for %s (%s). rc=%Rrc\n", pszModule, pszFilename, rc));
+- if (!RTErrInfoIsSet(pErrInfo))
+- RTErrInfoSetF(pErrInfo, rc, "RTLdrGetBits failed");
++ AssertMsgFailed(("failed to allocated %u bytes for SUPLDRLOAD_IN structure!\n", SUP_IOCTL_LDR_LOAD_SIZE(cbImageWithEverything)));
++ rc = RTErrInfoSetF(pErrInfo, VERR_NO_TMP_MEMORY, "Failed to allocate %u bytes for the load request",
++ SUP_IOCTL_LDR_LOAD_SIZE(cbImageWithEverything));
+ }
+- RTMemTmpFree(pLoadReq);
+ }
+- else
++ /*
++ * Already loaded?
++ */
++ else if (RT_SUCCESS(rc))
+ {
+- AssertMsgFailed(("failed to allocated %u bytes for SUPLDRLOAD_IN structure!\n", SUP_IOCTL_LDR_LOAD_SIZE(cbImageWithTabs)));
+- rc = VERR_NO_TMP_MEMORY;
+- RTErrInfoSetF(pErrInfo, rc, "Failed to allocate %u bytes for the load request", SUP_IOCTL_LDR_LOAD_SIZE(cbImageWithTabs));
+- }
+- }
+- /*
+- * Already loaded?
+- */
+- else if (RT_SUCCESS(rc))
+- {
+- if (fIsVMMR0)
+- g_pvVMMR0 = OpenReq.u.Out.pvImageBase;
+- LogRel(("SUP: Opened %s (%s) at %#RKv%s.\n", pszModule, pszFilename, OpenReq.u.Out.pvImageBase,
+- OpenReq.u.Out.fNativeLoader ? " loaded by the native ring-0 loader" : ""));
++ if (fIsVMMR0)
++ g_pvVMMR0 = OpenReq.u.Out.pvImageBase;
++ LogRel(("SUP: Opened %s (%s) at %#RKv%s.\n", pszModule, pszFilename, OpenReq.u.Out.pvImageBase,
++ OpenReq.u.Out.fNativeLoader ? " loaded by the native ring-0 loader" : ""));
+ #ifdef RT_OS_WINDOWS
+- LogRel(("SUP: windbg> .reload /f %s=%#RKv\n", pszFilename, OpenReq.u.Out.pvImageBase));
++ LogRel(("SUP: windbg> .reload /f %s=%#RKv\n", pszFilename, OpenReq.u.Out.pvImageBase));
+ #endif
++ }
++ /*
++ * No, failed.
++ */
++ else
++ RTErrInfoSet(pErrInfo, rc, "SUP_IOCTL_LDR_OPEN failed");
+ }
+- /*
+- * No, failed.
+- */
+- else
+- RTErrInfoSet(pErrInfo, rc, "SUP_IOCTL_LDR_OPEN failed");
++ else if (!RTErrInfoIsSet(pErrInfo) && pErrInfo)
++ RTErrInfoSetF(pErrInfo, rc, "RTLdrEnumSegments #1 failed");
+ }
+ else
+ RTErrInfoSetF(pErrInfo, rc, "RTLdrEnumSymbols #1 failed");
+@@ -682,10 +928,10 @@ SUPR3DECL(int) SUPR3GetSymbolR0(void *pv
+ }
+
+
+-SUPR3DECL(int) SUPR3LoadVMM(const char *pszFilename)
++SUPR3DECL(int) SUPR3LoadVMM(const char *pszFilename, PRTERRINFO pErrInfo)
+ {
+ void *pvImageBase;
+- return SUPR3LoadModule(pszFilename, "VMMR0.r0", &pvImageBase, NULL /*pErrInfo*/);
++ return SUPR3LoadModule(pszFilename, "VMMR0.r0", &pvImageBase, pErrInfo);
+ }
+
+
+--- a/src/VBox/HostDrivers/Support/testcase/tstInt.cpp
++++ b/src/VBox/HostDrivers/Support/testcase/tstInt.cpp
+@@ -76,7 +76,8 @@ int main(int argc, char **argv)
+ /*
+ * Load VMM code.
+ */
+- rc = SUPR3LoadVMM(szAbsFile);
++ RTERRINFOSTATIC ErrInfo;
++ rc = SUPR3LoadVMM(szAbsFile, RTErrInfoInitStatic(&ErrInfo));
+ if (RT_SUCCESS(rc))
+ {
+ /*
+@@ -208,7 +209,7 @@ int main(int argc, char **argv)
+ }
+ else
+ {
+- RTPrintf("tstInt: SUPR3LoadVMM failed with rc=%Rrc\n", rc);
++ RTPrintf("tstInt: SUPR3LoadVMM failed with rc=%Rrc%#RTeim\n", rc, &ErrInfo.Core);
+ rcRet++;
+ }
+
+--- a/src/VBox/Devices/Makefile.kmk
++++ b/src/VBox/Devices/Makefile.kmk
+@@ -52,7 +52,7 @@ if !defined(VBOX_ONLY_EXTPACKS)
+ if1of ($(KBUILD_TARGET_ARCH), $(VBOX_SUPPORTED_HOST_ARCHS))
+ LIBRARIES += ServicesR0
+ DLLS += VBoxDDU VBoxDD VBoxDD2
+- SYSMODS += VBoxDDR0
++ $(if-expr defined(VBOX_WITH_VBOXR0_AS_DLL),DLLS,SYSMODS) += VBoxDDR0
+ ifdef VBOX_WITH_RAW_MODE
+ SYSMODS += VBoxDDRC
+ endif
+@@ -1370,7 +1370,7 @@ if defined(VBOX_WITH_EXTPACK) && defined
+ USB/DevXHCI.cpp
+ $(call VBOX_SET_VER_INFO_DLL,VBoxEhciR3,PUEL Extension Pack - EHCI Device)
+
+- SYSMODS += VBoxEhciR0
++ $(if-expr defined(VBOX_WITH_VBOXR0_AS_DLL),DLLS,SYSMODS) += VBoxEhciR0
+ VBoxEhciR0_TEMPLATE = VBoxR0ExtPackPuel
+ VBoxEhciR0_SOURCES = \
+ USB/DevEHCI.cpp \
+@@ -1406,7 +1406,7 @@ if defined(VBOX_WITH_EXTPACK) && defined
+ VBoxPciRawDrv_SOURCES = Bus/DrvPciRaw.cpp
+ $(call VBOX_SET_VER_INFO_DLL,VBoxPciRawDrv,PUEL Extension Pack - PCI Passthrough Driver)
+
+- SYSMODS += VBoxPciRawR0
++ $(if-expr defined(VBOX_WITH_VBOXR0_AS_DLL),DLLS,SYSMODS) += VBoxPciRawR0
+ VBoxPciRawR0_TEMPLATE = VBoxR0ExtPackPuel
+ VBoxPciRawR0_SOURCES = Bus/DevPciRaw.cpp
+ $(call VBOX_SET_VER_INFO_R0,VBoxPciRawR0,PUEL Extension Pack - PCI Passthrough Driver$(COMMA) ring-0)
+@@ -1424,7 +1424,7 @@ if defined(VBOX_WITH_EXTPACK) && defined
+ Storage/DevNVMe.cpp
+ $(call VBOX_SET_VER_INFO_DLL,VBoxNvmeR3,PUEL Extension Pack - NVMe Device)
+
+- SYSMODS += VBoxNvmeR0
++ $(if-expr defined(VBOX_WITH_VBOXR0_AS_DLL),DLLS,SYSMODS) += VBoxNvmeR0
+ VBoxNvmeR0_TEMPLATE = VBoxR0ExtPackPuel
+ VBoxNvmeR0_SOURCES = \
+ Storage/DevNVMe.cpp
+--- a/src/VBox/ExtPacks/VBoxDTrace/Makefile.kmk
++++ b/src/VBox/ExtPacks/VBoxDTrace/Makefile.kmk
+@@ -242,7 +242,7 @@ if defined(VBOX_WITH_EXTPACK_VBOXDTRACE)
+ # The ring-0 part of VBoxDTrace.
+ #
+ ifneq ($(KBUILD_TARGET),solaris) # disabled on solaris - neiter needed nor currently able to build it here.
+- SYSMODS += VBoxDTraceR0
++ $(if-expr defined(VBOX_WITH_VBOXR0_AS_DLL),DLLS,SYSMODS) += VBoxDTraceR0
+ endif
+ VBoxDTraceR0_TEMPLATE = VBoxR0ExtPackDTrace
+ VBoxDTraceR0_DEFS = IN_VBOXDTRACE_R0 IN_RT_R0
+--- a/src/VBox/ExtPacks/BusMouseSample/Makefile.kmk
++++ b/src/VBox/ExtPacks/BusMouseSample/Makefile.kmk
+@@ -83,7 +83,7 @@ DLLS += VBoxBusMouseR3
+ VBoxBusMouseR3_TEMPLATE = VBoxR3ExtPackBusMouse
+ VBoxBusMouseR3_SOURCES = DevBusMouse.cpp
+
+-SYSMODS += VBoxBusMouseR0
++$(if-expr defined(VBOX_WITH_VBOXR0_AS_DLL),DLLS,SYSMODS) += VBoxBusMouseR0
+ VBoxBusMouseR0_TEMPLATE = VBoxR0ExtPackBusMouse
+ VBoxBusMouseR0_SOURCES = DevBusMouse.cpp
+
+--- a/src/VBox/Runtime/testcase/Makefile.kmk
++++ b/src/VBox/Runtime/testcase/Makefile.kmk
+@@ -210,13 +210,13 @@ if1of ($(KBUILD_TARGET_ARCH), amd64 x86)
+ tstRTR0ThreadDriver
+ endif
+ if1of ($(KBUILD_TARGET_ARCH), $(VBOX_SUPPORTED_HOST_ARCHS))
+- SYSMODS += \
++ $(if-expr defined(VBOX_WITH_VBOXR0_AS_DLL),DLLS,SYSMODS) += \
+ tstLdrObjR0
+ ifdef VBOX_WITH_RAW_MODE
+ SYSMODS += tstLdrObj
+ endif
+ endif
+- SYSMODS += \
++ $(if-expr defined(VBOX_WITH_VBOXR0_AS_DLL),DLLS,SYSMODS) += \
+ tstRTR0MemUserKernel \
+ tstRTR0SemMutex \
+ tstRTR0Timer \
+@@ -224,7 +224,7 @@ if1of ($(KBUILD_TARGET_ARCH), amd64 x86)
+ tstRTR0Thread
+ if1of ($(KBUILD_TARGET), solaris darwin)
+ PROGRAMS += tstRTR0DbgKrnlInfoDriver
+- SYSMODS += tstRTR0DbgKrnlInfo
++ $(if-expr defined(VBOX_WITH_VBOXR0_AS_DLL),DLLS,SYSMODS) += tstRTR0DbgKrnlInfo
+ endif # VBOX_SUPPORTED_HOST_ARCHS only
+
+ endif
+--- a/src/VBox/VMM/Makefile.kmk
++++ b/src/VBox/VMM/Makefile.kmk
+@@ -435,7 +435,7 @@ ifndef VBOX_ONLY_EXTPACKS
+ #
+ # VMMR0.r0
+ #
+-SYSMODS += VMMR0
++$(if-expr defined(VBOX_WITH_VBOXR0_AS_DLL),DLLS,SYSMODS) += VMMR0
+ VMMR0_TEMPLATE = VBoxR0
+ VMMR0_SYSSUFF = .r0
+
+--- a/src/VBox/ValidationKit/utils/misc/Makefile.kmk
++++ b/src/VBox/ValidationKit/utils/misc/Makefile.kmk
+@@ -31,7 +31,7 @@ PROGRAMS += LoadGenerator
+ LoadGenerator_TEMPLATE = VBoxValidationKitR3Host
+ LoadGenerator_SOURCES = loadgenerator.cpp
+
+-SYSMODS += loadgeneratorR0
++$(if-expr defined(VBOX_WITH_VBOXR0_AS_DLL),DLLS,SYSMODS) += loadgeneratorR0
+ loadgeneratorR0_TEMPLATE = VBoxValidationKitR0
+ loadgeneratorR0_SOURCES = loadgeneratorR0.cpp
+
+--- a/src/VBox/HostDrivers/Support/SUPLib.cpp
++++ b/src/VBox/HostDrivers/Support/SUPLib.cpp
+@@ -275,9 +275,9 @@ SUPR3DECL(int) SUPR3InitEx(bool fUnrestr
+ CookieReq.Hdr.rc = VERR_INTERNAL_ERROR;
+ strcpy(CookieReq.u.In.szMagic, SUPCOOKIE_MAGIC);
+ CookieReq.u.In.u32ReqVersion = SUPDRV_IOC_VERSION;
+- const uint32_t uMinVersion = (SUPDRV_IOC_VERSION & 0xffff0000) == 0x002d0000
++ const uint32_t uMinVersion = /*(SUPDRV_IOC_VERSION & 0xffff0000) == 0x002d0000
+ ? 0x002d0001
+- : SUPDRV_IOC_VERSION & 0xffff0000;
++ :*/ SUPDRV_IOC_VERSION & 0xffff0000;
+ CookieReq.u.In.u32MinVersion = uMinVersion;
+ rc = suplibOsIOCtl(&g_supLibData, SUP_IOCTL_COOKIE, &CookieReq, SUP_IOCTL_COOKIE_SIZE);
+ if ( RT_SUCCESS(rc)
+--- a/src/VBox/HostDrivers/Support/SUPDrvIOC.h
++++ b/src/VBox/HostDrivers/Support/SUPDrvIOC.h
+@@ -220,9 +220,10 @@ typedef SUPREQHDR *PSUPREQHDR;
+ * -# When increment the major number, execute all pending work.
+ *
+ * @todo Pending work on next major version change:
+- * - Move SUP_IOCTL_FAST_DO_NOP and SUP_VMMR0_DO_NEM_RUN after NEM.
++ * - Nothing.
++ * @note 0x002f0000 is used by 6.0. The next version number must be 0x00300000.
+ */
+-#define SUPDRV_IOC_VERSION 0x002d0001
++#define SUPDRV_IOC_VERSION 0x002e0000
+
+ /** SUP_IOCTL_COOKIE. */
+ typedef struct SUPCOOKIE
+@@ -314,8 +315,8 @@ typedef struct SUPLDROPEN
+ {
+ struct
+ {
+- /** Size of the image we'll be loading (including tables). */
+- uint32_t cbImageWithTabs;
++ /** Size of the image we'll be loading (including all tables). */
++ uint32_t cbImageWithEverything;
+ /** The size of the image bits. (Less or equal to cbImageWithTabs.) */
+ uint32_t cbImageBits;
+ /** Image name.
+@@ -390,6 +391,29 @@ typedef SUPLDRSYM *PSUPLDRSYM;
+ /** Pointer to a const symbol table entry. */
+ typedef SUPLDRSYM const *PCSUPLDRSYM;
+
++#define SUPLDR_PROT_READ 1 /**< Grant read access (RTMEM_PROT_READ). */
++#define SUPLDR_PROT_WRITE 2 /**< Grant write access (RTMEM_PROT_WRITE). */
++#define SUPLDR_PROT_EXEC 4 /**< Grant execute access (RTMEM_PROT_EXEC). */
++
++/**
++ * A segment table entry - chiefly for conveying memory protection.
++ */
++typedef struct SUPLDRSEG
++{
++ /** The RVA of the segment. */
++ uint32_t off;
++ /** The size of the segment. */
++ uint32_t cb : 28;
++ /** The segment protection (SUPLDR_PROT_XXX). */
++ uint32_t fProt : 3;
++ /** MBZ. */
++ uint32_t fUnused;
++} SUPLDRSEG;
++/** Pointer to a segment table entry. */
++typedef SUPLDRSEG *PSUPLDRSEG;
++/** Pointer to a const segment table entry. */
++typedef SUPLDRSEG const *PCSUPLDRSEG;
++
+ /**
+ * SUPLDRLOAD::u::In::EP type.
+ */
+@@ -443,7 +467,7 @@ typedef struct SUPLDRLOAD
+ /** The size of the image bits (starting at offset 0 and
+ * approaching offSymbols). */
+ uint32_t cbImageBits;
+- /** The offset of the symbol table. */
++ /** The offset of the symbol table (SUPLDRSYM array). */
+ uint32_t offSymbols;
+ /** The number of entries in the symbol table. */
+ uint32_t cSymbols;
+@@ -451,8 +475,12 @@ typedef struct SUPLDRLOAD
+ uint32_t offStrTab;
+ /** Size of the string table. */
+ uint32_t cbStrTab;
++ /** Offset to the segment table (SUPLDRSEG array). */
++ uint32_t offSegments;
++ /** Number of segments. */
++ uint32_t cSegments;
+ /** Size of image data in achImage. */
+- uint32_t cbImageWithTabs;
++ uint32_t cbImageWithEverything;
+ /** The image data. */
+ uint8_t abImage[1];
+ } In;
+--- a/src/VBox/HostDrivers/Support/SUPDrvInternal.h
++++ b/src/VBox/HostDrivers/Support/SUPDrvInternal.h
+@@ -145,6 +145,12 @@
+ # define SUPDRV_USE_MUTEX_FOR_GIP
+ #endif
+
++#if defined(RT_OS_LINUX) /** @todo make everyone do this */
++/** Use the RTR0MemObj API rather than the RTMemExecAlloc for the images.
++ * This is a good idea in general, but a necessity for @bugref{9801}. */
++# define SUPDRV_USE_MEMOBJ_FOR_LDR_IMAGE
++#endif
++
+
+ /**
+ * OS debug print macro.
+@@ -326,15 +332,20 @@ typedef struct SUPDRVLDRIMAGE
+ struct SUPDRVLDRIMAGE * volatile pNext;
+ /** Pointer to the image. */
+ void *pvImage;
++#ifdef SUPDRV_USE_MEMOBJ_FOR_LDR_IMAGE
++ /** The memory object for the module allocation. */
++ RTR0MEMOBJ hMemObjImage;
++#else
+ /** Pointer to the allocated image buffer.
+ * pvImage is 32-byte aligned or it may governed by the native loader (this
+ * member is NULL then). */
+ void *pvImageAlloc;
++#endif
+ /** Magic value (SUPDRVLDRIMAGE_MAGIC). */
+ uint32_t uMagic;
+ /** Size of the image including the tables. This is mainly for verification
+ * of the load request. */
+- uint32_t cbImageWithTabs;
++ uint32_t cbImageWithEverything;
+ /** Size of the image. */
+ uint32_t cbImageBits;
+ /** The number of entries in the symbol table. */
+@@ -345,6 +356,10 @@ typedef struct SUPDRVLDRIMAGE
+ char *pachStrTab;
+ /** Size of the string table. */
+ uint32_t cbStrTab;
++ /** Number of segments. */
++ uint32_t cSegments;
++ /** Segments (for memory protection). */
++ PSUPLDRSEG paSegments;
+ /** Pointer to the optional module initialization callback. */
+ PFNR0MODULEINIT pfnModuleInit;
+ /** Pointer to the optional module termination callback. */
+--- a/src/VBox/HostDrivers/Support/SUPDrv.cpp
++++ b/src/VBox/HostDrivers/Support/SUPDrv.cpp
+@@ -1734,11 +1734,10 @@ static int supdrvIOCtlInnerUnrestricted(
+ /* validate */
+ PSUPLDROPEN pReq = (PSUPLDROPEN)pReqHdr;
+ REQ_CHECK_SIZES(SUP_IOCTL_LDR_OPEN);
+- REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageWithTabs > 0);
+- REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageWithTabs < 16*_1M);
++ REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageWithEverything > 0);
++ REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageWithEverything < 16*_1M);
+ REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageBits > 0);
+- REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageBits > 0);
+- REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageBits < pReq->u.In.cbImageWithTabs);
++ REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageBits < pReq->u.In.cbImageWithEverything);
+ REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.szName[0]);
+ REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, RTStrEnd(pReq->u.In.szName, sizeof(pReq->u.In.szName)));
+ REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, supdrvIsLdrModuleNameValid(pReq->u.In.szName));
+@@ -1754,19 +1753,29 @@ static int supdrvIOCtlInnerUnrestricted(
+ /* validate */
+ PSUPLDRLOAD pReq = (PSUPLDRLOAD)pReqHdr;
+ REQ_CHECK_EXPR(Name, pReq->Hdr.cbIn >= SUP_IOCTL_LDR_LOAD_SIZE_IN(32));
+- REQ_CHECK_SIZES_EX(SUP_IOCTL_LDR_LOAD, SUP_IOCTL_LDR_LOAD_SIZE_IN(pReq->u.In.cbImageWithTabs), SUP_IOCTL_LDR_LOAD_SIZE_OUT);
+- REQ_CHECK_EXPR(SUP_IOCTL_LDR_LOAD, pReq->u.In.cSymbols <= 16384);
++ REQ_CHECK_SIZES_EX(SUP_IOCTL_LDR_LOAD, SUP_IOCTL_LDR_LOAD_SIZE_IN(pReq->u.In.cbImageWithEverything), SUP_IOCTL_LDR_LOAD_SIZE_OUT);
+ REQ_CHECK_EXPR_FMT( !pReq->u.In.cSymbols
+- || ( pReq->u.In.offSymbols < pReq->u.In.cbImageWithTabs
+- && pReq->u.In.offSymbols + pReq->u.In.cSymbols * sizeof(SUPLDRSYM) <= pReq->u.In.cbImageWithTabs),
+- ("SUP_IOCTL_LDR_LOAD: offSymbols=%#lx cSymbols=%#lx cbImageWithTabs=%#lx\n", (long)pReq->u.In.offSymbols,
+- (long)pReq->u.In.cSymbols, (long)pReq->u.In.cbImageWithTabs));
++ || ( pReq->u.In.cSymbols <= 16384
++ && pReq->u.In.offSymbols >= pReq->u.In.cbImageBits
++ && pReq->u.In.offSymbols < pReq->u.In.cbImageWithEverything
++ && pReq->u.In.offSymbols + pReq->u.In.cSymbols * sizeof(SUPLDRSYM) <= pReq->u.In.cbImageWithEverything),
++ ("SUP_IOCTL_LDR_LOAD: offSymbols=%#lx cSymbols=%#lx cbImageWithEverything=%#lx\n", (long)pReq->u.In.offSymbols,
++ (long)pReq->u.In.cSymbols, (long)pReq->u.In.cbImageWithEverything));
+ REQ_CHECK_EXPR_FMT( !pReq->u.In.cbStrTab
+- || ( pReq->u.In.offStrTab < pReq->u.In.cbImageWithTabs
+- && pReq->u.In.offStrTab + pReq->u.In.cbStrTab <= pReq->u.In.cbImageWithTabs
+- && pReq->u.In.cbStrTab <= pReq->u.In.cbImageWithTabs),
+- ("SUP_IOCTL_LDR_LOAD: offStrTab=%#lx cbStrTab=%#lx cbImageWithTabs=%#lx\n", (long)pReq->u.In.offStrTab,
+- (long)pReq->u.In.cbStrTab, (long)pReq->u.In.cbImageWithTabs));
++ || ( pReq->u.In.offStrTab < pReq->u.In.cbImageWithEverything
++ && pReq->u.In.offStrTab >= pReq->u.In.cbImageBits
++ && pReq->u.In.offStrTab + pReq->u.In.cbStrTab <= pReq->u.In.cbImageWithEverything
++ && pReq->u.In.cbStrTab <= pReq->u.In.cbImageWithEverything),
++ ("SUP_IOCTL_LDR_LOAD: offStrTab=%#lx cbStrTab=%#lx cbImageWithEverything=%#lx\n", (long)pReq->u.In.offStrTab,
++ (long)pReq->u.In.cbStrTab, (long)pReq->u.In.cbImageWithEverything));
++ REQ_CHECK_EXPR_FMT( pReq->u.In.cSegments >= 1
++ && pReq->u.In.cSegments <= 128
++ && pReq->u.In.cSegments <= pReq->u.In.cbImageBits / PAGE_SIZE
++ && pReq->u.In.offSegments >= pReq->u.In.cbImageBits
++ && pReq->u.In.offSegments < pReq->u.In.cbImageWithEverything
++ && pReq->u.In.offSegments + pReq->u.In.cSegments * sizeof(SUPLDRSEG) <= pReq->u.In.cbImageWithEverything,
++ ("SUP_IOCTL_LDR_LOAD: offSegments=%#lx cSegments=%#lx cbImageWithEverything=%#lx\n", (long)pReq->u.In.offSegments,
++ (long)pReq->u.In.cSegments, (long)pReq->u.In.cbImageWithEverything));
+
+ if (pReq->u.In.cSymbols)
+ {
+@@ -1774,15 +1783,37 @@ static int supdrvIOCtlInnerUnrestricted(
+ PSUPLDRSYM paSyms = (PSUPLDRSYM)&pReq->u.In.abImage[pReq->u.In.offSymbols];
+ for (i = 0; i < pReq->u.In.cSymbols; i++)
+ {
+- REQ_CHECK_EXPR_FMT(paSyms[i].offSymbol < pReq->u.In.cbImageWithTabs,
+- ("SUP_IOCTL_LDR_LOAD: sym #%ld: symb off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offSymbol, (long)pReq->u.In.cbImageWithTabs));
++ REQ_CHECK_EXPR_FMT(paSyms[i].offSymbol < pReq->u.In.cbImageWithEverything,
++ ("SUP_IOCTL_LDR_LOAD: sym #%ld: symb off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offSymbol, (long)pReq->u.In.cbImageWithEverything));
+ REQ_CHECK_EXPR_FMT(paSyms[i].offName < pReq->u.In.cbStrTab,
+- ("SUP_IOCTL_LDR_LOAD: sym #%ld: name off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImageWithTabs));
++ ("SUP_IOCTL_LDR_LOAD: sym #%ld: name off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImageWithEverything));
+ REQ_CHECK_EXPR_FMT(RTStrEnd((char const *)&pReq->u.In.abImage[pReq->u.In.offStrTab + paSyms[i].offName],
+ pReq->u.In.cbStrTab - paSyms[i].offName),
+- ("SUP_IOCTL_LDR_LOAD: sym #%ld: unterminated name! (%#lx / %#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImageWithTabs));
++ ("SUP_IOCTL_LDR_LOAD: sym #%ld: unterminated name! (%#lx / %#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImageWithEverything));
+ }
+ }
++ {
++ uint32_t i;
++ uint32_t offPrevEnd = 0;
++ PSUPLDRSEG paSegs = (PSUPLDRSEG)&pReq->u.In.abImage[pReq->u.In.offSegments];
++ for (i = 0; i < pReq->u.In.cSegments; i++)
++ {
++ REQ_CHECK_EXPR_FMT(paSegs[i].off < pReq->u.In.cbImageBits && !(paSegs[i].off & PAGE_OFFSET_MASK),
++ ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx (max=%#lx)\n", (long)i, (long)paSegs[i].off, (long)pReq->u.In.cbImageBits));
++ REQ_CHECK_EXPR_FMT(paSegs[i].cb <= pReq->u.In.cbImageBits,
++ ("SUP_IOCTL_LDR_LOAD: seg #%ld: cb %#lx (max=%#lx)\n", (long)i, (long)paSegs[i].cb, (long)pReq->u.In.cbImageBits));
++ REQ_CHECK_EXPR_FMT(paSegs[i].off + paSegs[i].cb <= pReq->u.In.cbImageBits,
++ ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx + cb %#lx = %#lx (max=%#lx)\n", (long)i, (long)paSegs[i].off, (long)paSegs[i].cb, (long)(paSegs[i].off + paSegs[i].cb), (long)pReq->u.In.cbImageBits));
++ REQ_CHECK_EXPR_FMT(paSegs[i].fProt != 0,
++ ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx + cb %#lx\n", (long)i, (long)paSegs[i].off, (long)paSegs[i].cb));
++ REQ_CHECK_EXPR_FMT(paSegs[i].fUnused == 0, ("SUP_IOCTL_LDR_LOAD: seg #%ld: fUnused=1\n", (long)i));
++ REQ_CHECK_EXPR_FMT(offPrevEnd == paSegs[i].off,
++ ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx offPrevEnd %#lx\n", (long)i, (long)paSegs[i].off, (long)offPrevEnd));
++ offPrevEnd = paSegs[i].off + paSegs[i].cb;
++ }
++ REQ_CHECK_EXPR_FMT(offPrevEnd == pReq->u.In.cbImageBits,
++ ("SUP_IOCTL_LDR_LOAD: offPrevEnd %#lx cbImageBits %#lx\n", (long)i, (long)offPrevEnd, (long)pReq->u.In.cbImageBits));
++ }
+
+ /* execute */
+ pReq->Hdr.rc = supdrvIOCtl_LdrLoad(pDevExt, pSession, pReq);
+@@ -5021,7 +5052,7 @@ static int supdrvIOCtl_LdrOpen(PSUPDRVDE
+ size_t cchName = strlen(pReq->u.In.szName); /* (caller checked < 32). */
+ SUPDRV_CHECK_SMAP_SETUP();
+ SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
+- LogFlow(("supdrvIOCtl_LdrOpen: szName=%s cbImageWithTabs=%d\n", pReq->u.In.szName, pReq->u.In.cbImageWithTabs));
++ LogFlow(("supdrvIOCtl_LdrOpen: szName=%s cbImageWithEverything=%d\n", pReq->u.In.szName, pReq->u.In.cbImageWithEverything));
+
+ /*
+ * Check if we got an instance of the image already.
+@@ -5035,7 +5066,8 @@ static int supdrvIOCtl_LdrOpen(PSUPDRVDE
+ {
+ if (RT_LIKELY(pImage->cUsage < UINT32_MAX / 2U))
+ {
+- /** @todo check cbImageBits and cbImageWithTabs here, if they differs that indicates that the images are different. */
++ /** @todo check cbImageBits and cbImageWithEverything here, if they differs
++ * that indicates that the images are different. */
+ pImage->cUsage++;
+ pReq->u.Out.pvImageBase = pImage->pvImage;
+ pReq->u.Out.fNeedsLoading = pImage->uState == SUP_IOCTL_LDR_OPEN;
+@@ -5078,13 +5110,19 @@ static int supdrvIOCtl_LdrOpen(PSUPDRVDE
+ */
+ pImage = (PSUPDRVLDRIMAGE)pv;
+ pImage->pvImage = NULL;
++#ifdef SUPDRV_USE_MEMOBJ_FOR_LDR_IMAGE
++ pImage->hMemObjImage = NIL_RTR0MEMOBJ;
++#else
+ pImage->pvImageAlloc = NULL;
+- pImage->cbImageWithTabs = pReq->u.In.cbImageWithTabs;
++#endif
++ pImage->cbImageWithEverything = pReq->u.In.cbImageWithEverything;
+ pImage->cbImageBits = pReq->u.In.cbImageBits;
+ pImage->cSymbols = 0;
+ pImage->paSymbols = NULL;
+ pImage->pachStrTab = NULL;
+ pImage->cbStrTab = 0;
++ pImage->cSegments = 0;
++ pImage->paSegments = NULL;
+ pImage->pfnModuleInit = NULL;
+ pImage->pfnModuleTerm = NULL;
+ pImage->pfnServiceReqHandler = NULL;
+@@ -5102,10 +5140,19 @@ static int supdrvIOCtl_LdrOpen(PSUPDRVDE
+ rc = supdrvOSLdrOpen(pDevExt, pImage, pReq->u.In.szFilename);
+ if (rc == VERR_NOT_SUPPORTED)
+ {
++#ifdef SUPDRV_USE_MEMOBJ_FOR_LDR_IMAGE
++ rc = RTR0MemObjAllocPage(&pImage->hMemObjImage, pImage->cbImageBits, true /*fExecutable*/);
++ if (RT_SUCCESS(rc))
++ {
++ pImage->pvImage = RTR0MemObjAddress(pImage->hMemObjImage);
++ pImage->fNative = false;
++ }
++#else
+ pImage->pvImageAlloc = RTMemExecAlloc(pImage->cbImageBits + 31);
+ pImage->pvImage = RT_ALIGN_P(pImage->pvImageAlloc, 32);
+ pImage->fNative = false;
+ rc = pImage->pvImageAlloc ? VINF_SUCCESS : VERR_NO_EXEC_MEMORY;
++#endif
+ SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
+ }
+ if (RT_FAILURE(rc))
+@@ -5138,41 +5185,90 @@ static int supdrvIOCtl_LdrOpen(PSUPDRVDE
+
+
+ /**
++ * Formats a load error message.
++ *
++ * @returns @a rc
++ * @param rc Return code.
++ * @param pReq The request.
++ * @param pszFormat The error message format string.
++ * @param ... Argument to the format string.
++ */
++int VBOXCALL supdrvLdrLoadError(int rc, PSUPLDRLOAD pReq, const char *pszFormat, ...)
++{
++ va_list va;
++ va_start(va, pszFormat);
++ pReq->u.Out.uErrorMagic = SUPLDRLOAD_ERROR_MAGIC;
++ RTStrPrintfV(pReq->u.Out.szError, sizeof(pReq->u.Out.szError), pszFormat, va);
++ va_end(va);
++ Log(("SUP_IOCTL_LDR_LOAD: %s [rc=%Rrc]\n", pReq->u.Out.szError, rc));
++ return rc;
++}
++
++
++/**
+ * Worker that validates a pointer to an image entrypoint.
+ *
++ * Calls supdrvLdrLoadError on error.
++ *
+ * @returns IPRT status code.
+ * @param pDevExt The device globals.
+ * @param pImage The loader image.
+ * @param pv The pointer into the image.
+ * @param fMayBeNull Whether it may be NULL.
+- * @param fCheckNative Whether to check with the native loaders.
+- * @param pszSymbol The entrypoint name or log name. If the symbol
++ * @param pszSymbol The entrypoint name or log name. If the symbol is
+ * capitalized it signifies a specific symbol, otherwise it
+ * for logging.
+ * @param pbImageBits The image bits prepared by ring-3.
++ * @param pReq The request for passing to supdrvLdrLoadError.
+ *
+- * @remarks Will leave the lock on failure.
++ * @note Will leave the loader lock on failure!
+ */
+ static int supdrvLdrValidatePointer(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage, void *pv, bool fMayBeNull,
+- bool fCheckNative, const uint8_t *pbImageBits, const char *pszSymbol)
++ const uint8_t *pbImageBits, const char *pszSymbol, PSUPLDRLOAD pReq)
+ {
+ if (!fMayBeNull || pv)
+ {
+- if ((uintptr_t)pv - (uintptr_t)pImage->pvImage >= pImage->cbImageBits)
++ uint32_t iSeg;
++
++ /* Must be within the image bits: */
++ uintptr_t const uRva = (uintptr_t)pv - (uintptr_t)pImage->pvImage;
++ if (uRva >= pImage->cbImageBits)
+ {
+ supdrvLdrUnlock(pDevExt);
+- Log(("Out of range (%p LB %#x): %s=%p\n", pImage->pvImage, pImage->cbImageBits, pszSymbol, pv));
+- return VERR_INVALID_PARAMETER;
++ return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
++ "Invalid entry point address %p given for %s: RVA %#zx, image size %#zx",
++ pv, pszSymbol, uRva, pImage->cbImageBits);
+ }
+
+- if (pImage->fNative && fCheckNative)
++ /* Must be in an executable segment: */
++ for (iSeg = 0; iSeg < pImage->cSegments; iSeg++)
++ if (uRva - pImage->paSegments[iSeg].off < (uintptr_t)pImage->paSegments[iSeg].cb)
++ {
++ if (pImage->paSegments[iSeg].fProt & SUPLDR_PROT_EXEC)
++ break;
++ supdrvLdrUnlock(pDevExt);
++ return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
++ "Bad entry point %p given for %s: not executable (seg #%u: %#RX32 LB %#RX32 prot %#x)",
++ pv, pszSymbol, iSeg, pImage->paSegments[iSeg].off, pImage->paSegments[iSeg].cb,
++ pImage->paSegments[iSeg].fProt);
++ }
++ if (iSeg >= pImage->cSegments)
+ {
++ supdrvLdrUnlock(pDevExt);
++ return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
++ "Bad entry point %p given for %s: no matching segment found (RVA %#zx)!",
++ pv, pszSymbol, uRva);
++ }
++
++ if (pImage->fNative)
++ {
++ /** @todo pass pReq along to the native code. */
+ int rc = supdrvOSLdrValidatePointer(pDevExt, pImage, pv, pbImageBits, pszSymbol);
+ if (RT_FAILURE(rc))
+ {
+ supdrvLdrUnlock(pDevExt);
+- Log(("Bad entry point address: %s=%p (rc=%Rrc)\n", pszSymbol, pv, rc));
+- return rc;
++ return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
++ "Bad entry point address %p for %s: rc=%Rrc\n", pv, pszSymbol, rc);
+ }
+ }
+ }
+@@ -5223,27 +5319,6 @@ int VBOXCALL supdrvLdrLoadError(int rc,
+
+
+ /**
+- * Formats a load error message.
+- *
+- * @returns @a rc
+- * @param rc Return code.
+- * @param pReq The request.
+- * @param pszFormat The error message format string.
+- * @param ... Argument to the format string.
+- */
+-int VBOXCALL supdrvLdrLoadError(int rc, PSUPLDRLOAD pReq, const char *pszFormat, ...)
+-{
+- va_list va;
+- va_start(va, pszFormat);
+- pReq->u.Out.uErrorMagic = SUPLDRLOAD_ERROR_MAGIC;
+- RTStrPrintfV(pReq->u.Out.szError, sizeof(pReq->u.Out.szError), pszFormat, va);
+- va_end(va);
+- Log(("SUP_IOCTL_LDR_LOAD: %s [rc=%Rrc]\n", pReq->u.Out.szError, rc));
+- return rc;
+-}
+-
+-
+-/**
+ * Loads the image bits.
+ *
+ * This is the 2nd step of the loading.
+@@ -5259,7 +5334,7 @@ static int supdrvIOCtl_LdrLoad(PSUPDRVDE
+ PSUPDRVLDRIMAGE pImage;
+ int rc;
+ SUPDRV_CHECK_SMAP_SETUP();
+- LogFlow(("supdrvIOCtl_LdrLoad: pvImageBase=%p cbImageWithBits=%d\n", pReq->u.In.pvImageBase, pReq->u.In.cbImageWithTabs));
++ LogFlow(("supdrvIOCtl_LdrLoad: pvImageBase=%p cbImageWithEverything=%d\n", pReq->u.In.pvImageBase, pReq->u.In.cbImageWithEverything));
+ SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
+
+ /*
+@@ -5281,12 +5356,12 @@ static int supdrvIOCtl_LdrLoad(PSUPDRVDE
+ /*
+ * Validate input.
+ */
+- if ( pImage->cbImageWithTabs != pReq->u.In.cbImageWithTabs
+- || pImage->cbImageBits != pReq->u.In.cbImageBits)
++ if ( pImage->cbImageWithEverything != pReq->u.In.cbImageWithEverything
++ || pImage->cbImageBits != pReq->u.In.cbImageBits)
+ {
+ supdrvLdrUnlock(pDevExt);
+- return supdrvLdrLoadError(VERR_INVALID_HANDLE, pReq, "Image size mismatch found: %d(prep) != %d(load) or %d != %d",
+- pImage->cbImageWithTabs, pReq->u.In.cbImageWithTabs, pImage->cbImageBits, pReq->u.In.cbImageBits);
++ return supdrvLdrLoadError(VERR_INVALID_HANDLE, pReq, "Image size mismatch found: %u(prep) != %u(load) or %u != %u",
++ pImage->cbImageWithEverything, pReq->u.In.cbImageWithEverything, pImage->cbImageBits, pReq->u.In.cbImageBits);
+ }
+
+ if (pImage->uState != SUP_IOCTL_LDR_OPEN)
+@@ -5306,35 +5381,56 @@ static int supdrvIOCtl_LdrLoad(PSUPDRVDE
+ return supdrvLdrLoadError(VERR_PERMISSION_DENIED, pReq, "Loader is locked down");
+ }
+
++ /*
++ * Copy the segments before we start using supdrvLdrValidatePointer for entrypoint validation.
++ */
++ pImage->cSegments = pReq->u.In.cSegments;
++ {
++ size_t cbSegments = pImage->cSegments * sizeof(SUPLDRSEG);
++ pImage->paSegments = (PSUPLDRSEG)RTMemDup(&pReq->u.In.abImage[pReq->u.In.offSegments], cbSegments);
++ if (pImage->paSegments) /* Align the last segment size to avoid upsetting RTR0MemObjProtect. */ /** @todo relax RTR0MemObjProtect */
++ pImage->paSegments[pImage->cSegments - 1].cb = RT_ALIGN_32(pImage->paSegments[pImage->cSegments - 1].cb, PAGE_SIZE);
++ else
++ {
++ supdrvLdrUnlock(pDevExt);
++ return supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for segment table: %#x", cbSegments);
++ }
++ SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
++ }
++
++ /*
++ * Validate entrypoints.
++ */
+ switch (pReq->u.In.eEPType)
+ {
+ case SUPLDRLOADEP_NOTHING:
+ break;
+
+ case SUPLDRLOADEP_VMMR0:
+- rc = supdrvLdrValidatePointer( pDevExt, pImage, pReq->u.In.EP.VMMR0.pvVMMR0, false, false, pReq->u.In.abImage, "pvVMMR0");
+- if (RT_SUCCESS(rc))
+- rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, false, true, pReq->u.In.abImage, "VMMR0EntryFast");
+- if (RT_SUCCESS(rc))
+- rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx, false, true, pReq->u.In.abImage, "VMMR0EntryEx");
++ if (pReq->u.In.EP.VMMR0.pvVMMR0 != pImage->pvImage)
++ {
++ supdrvLdrUnlock(pDevExt);
++ return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq, "Invalid pvVMMR0 pointer: %p, expected %p", pReq->u.In.EP.VMMR0.pvVMMR0, pImage->pvImage);
++ }
++ rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, false, pReq->u.In.abImage, "VMMR0EntryFast", pReq);
++ if (RT_FAILURE(rc))
++ return rc;
++ rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx, false, pReq->u.In.abImage, "VMMR0EntryEx", pReq);
+ if (RT_FAILURE(rc))
+- return supdrvLdrLoadError(rc, pReq, "Invalid VMMR0 pointer");
++ return rc;
+ break;
+
+ case SUPLDRLOADEP_SERVICE:
+- rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.Service.pfnServiceReq, false, true, pReq->u.In.abImage, "pfnServiceReq");
++ rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.Service.pfnServiceReq, false, pReq->u.In.abImage, "pfnServiceReq", pReq);
+ if (RT_FAILURE(rc))
+- return supdrvLdrLoadError(rc, pReq, "Invalid pfnServiceReq pointer: %p", pReq->u.In.EP.Service.pfnServiceReq);
++ return rc;
+ if ( pReq->u.In.EP.Service.apvReserved[0] != NIL_RTR0PTR
+ || pReq->u.In.EP.Service.apvReserved[1] != NIL_RTR0PTR
+ || pReq->u.In.EP.Service.apvReserved[2] != NIL_RTR0PTR)
+ {
+ supdrvLdrUnlock(pDevExt);
+- return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
+- "Out of range (%p LB %#x): apvReserved={%p,%p,%p} MBZ!",
+- pImage->pvImage, pReq->u.In.cbImageWithTabs,
+- pReq->u.In.EP.Service.apvReserved[0],
+- pReq->u.In.EP.Service.apvReserved[1],
++ return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq, "apvReserved={%p,%p,%p} MBZ!",
++ pReq->u.In.EP.Service.apvReserved[0], pReq->u.In.EP.Service.apvReserved[1],
+ pReq->u.In.EP.Service.apvReserved[2]);
+ }
+ break;
+@@ -5344,12 +5440,12 @@ static int supdrvIOCtl_LdrLoad(PSUPDRVDE
+ return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq, "Invalid eEPType=%d", pReq->u.In.eEPType);
+ }
+
+- rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.pfnModuleInit, true, true, pReq->u.In.abImage, "ModuleInit");
++ rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.pfnModuleInit, true, pReq->u.In.abImage, "ModuleInit", pReq);
+ if (RT_FAILURE(rc))
+- return supdrvLdrLoadError(rc, pReq, "Invalid pfnModuleInit pointer: %p", pReq->u.In.pfnModuleInit);
+- rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.pfnModuleTerm, true, true, pReq->u.In.abImage, "ModuleTerm");
++ return rc;
++ rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.pfnModuleTerm, true, pReq->u.In.abImage, "ModuleTerm", pReq);
+ if (RT_FAILURE(rc))
+- return supdrvLdrLoadError(rc, pReq, "Invalid pfnModuleTerm pointer: %p", pReq->u.In.pfnModuleTerm);
++ return rc;
+ SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
+
+ /*
+@@ -5361,10 +5457,8 @@ static int supdrvIOCtl_LdrLoad(PSUPDRVDE
+ pImage->cbStrTab = pReq->u.In.cbStrTab;
+ if (pImage->cbStrTab)
+ {
+- pImage->pachStrTab = (char *)RTMemAlloc(pImage->cbStrTab);
+- if (pImage->pachStrTab)
+- memcpy(pImage->pachStrTab, &pReq->u.In.abImage[pReq->u.In.offStrTab], pImage->cbStrTab);
+- else
++ pImage->pachStrTab = (char *)RTMemDup(&pReq->u.In.abImage[pReq->u.In.offStrTab], pImage->cbStrTab);
++ if (!pImage->pachStrTab)
+ rc = supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for string table: %#x", pImage->cbStrTab);
+ SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
+ }
+@@ -5373,17 +5467,15 @@ static int supdrvIOCtl_LdrLoad(PSUPDRVDE
+ if (RT_SUCCESS(rc) && pImage->cSymbols)
+ {
+ size_t cbSymbols = pImage->cSymbols * sizeof(SUPLDRSYM);
+- pImage->paSymbols = (PSUPLDRSYM)RTMemAlloc(cbSymbols);
+- if (pImage->paSymbols)
+- memcpy(pImage->paSymbols, &pReq->u.In.abImage[pReq->u.In.offSymbols], cbSymbols);
+- else
++ pImage->paSymbols = (PSUPLDRSYM)RTMemDup(&pReq->u.In.abImage[pReq->u.In.offSymbols], cbSymbols);
++ if (!pImage->paSymbols)
+ rc = supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for symbol table: %#x", cbSymbols);
+ SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
+ }
+ }
+
+ /*
+- * Copy the bits / complete native loading.
++ * Copy the bits and apply permissions / complete native loading.
+ */
+ if (RT_SUCCESS(rc))
+ {
+@@ -5395,7 +5487,26 @@ static int supdrvIOCtl_LdrLoad(PSUPDRVDE
+ rc = supdrvOSLdrLoad(pDevExt, pImage, pReq->u.In.abImage, pReq);
+ else
+ {
++#ifdef SUPDRV_USE_MEMOBJ_FOR_LDR_IMAGE
++ uint32_t i;
+ memcpy(pImage->pvImage, &pReq->u.In.abImage[0], pImage->cbImageBits);
++
++ for (i = 0; i < pImage->cSegments; i++)
++ {
++ rc = RTR0MemObjProtect(pImage->hMemObjImage, pImage->paSegments[i].off, pImage->paSegments[i].cb,
++ pImage->paSegments[i].fProt);
++ if (RT_SUCCESS(rc))
++ continue;
++ if (rc == VERR_NOT_SUPPORTED)
++ rc = VINF_SUCCESS;
++ else
++ rc = supdrvLdrLoadError(rc, pReq, "RTR0MemObjProtect failed on seg#%u %#RX32 LB %#RX32 fProt=%#x",
++ i, pImage->paSegments[i].off, pImage->paSegments[i].cb, pImage->paSegments[i].fProt);
++ break;
++ }
++#else
++ memcpy(pImage->pvImage, &pReq->u.In.abImage[0], pImage->cbImageBits);
++#endif
+ Log(("vboxdrv: Loaded '%s' at %p\n", pImage->szName, pImage->pvImage));
+ }
+ SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
+@@ -5990,12 +6101,20 @@ static void supdrvLdrFree(PSUPDRVDEVEXT
+ pImage->pDevExt = NULL;
+ pImage->pNext = NULL;
+ pImage->uState = SUP_IOCTL_LDR_FREE;
++#ifdef SUPDRV_USE_MEMOBJ_FOR_LDR_IMAGE
++ RTR0MemObjFree(pImage->hMemObjImage, true /*fMappings*/);
++ pImage->hMemObjImage = NIL_RTR0MEMOBJ;
++#else
+ RTMemExecFree(pImage->pvImageAlloc, pImage->cbImageBits + 31);
+ pImage->pvImageAlloc = NULL;
++#endif
++ pImage->pvImage = NULL;
+ RTMemFree(pImage->pachStrTab);
+ pImage->pachStrTab = NULL;
+ RTMemFree(pImage->paSymbols);
+ pImage->paSymbols = NULL;
++ RTMemFree(pImage->paSegments);
++ pImage->paSegments = NULL;
+ RTMemFree(pImage);
+ }
+
+--- a/src/VBox/Runtime/r0drv/linux/the-linux-kernel.h
++++ b/src/VBox/Runtime/r0drv/linux/the-linux-kernel.h
+@@ -176,6 +176,11 @@
+ # include <asm/set_memory.h>
+ #endif
+
++/* for __flush_tlb_all() */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) && (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86))
++# include <asm/tlbflush.h>
++#endif
++
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
+ # include <asm/smap.h>
+ #else
diff --git a/meta-oe/recipes-support/vboxguestdrivers/vboxguestdrivers/kernel-5.8-4.patch b/meta-oe/recipes-support/vboxguestdrivers/vboxguestdrivers/kernel-5.8-4.patch
deleted file mode 100644
index cb4148fc79..0000000000
--- a/meta-oe/recipes-support/vboxguestdrivers/vboxguestdrivers/kernel-5.8-4.patch
+++ /dev/null
@@ -1,19 +0,0 @@
-Description: Fix kernel 5.8 forbidding use of vermagic.h header file
-Author: Gianfranco Costamagna <locutusofborg@debian.org>
-Origin: https://www.virtualbox.org/ticket/19644
-Bug-Ubuntu: https://launchpad.net/bugs/1884652
-Last-Update: 2020-08-10
-
---- virtualbox-6.1.12-dfsg.orig/src/VBox/Additions/linux/sharedfolders/vfsmod.c
-+++ virtualbox-6.1.12-dfsg/src/VBox/Additions/linux/sharedfolders/vfsmod.c
-@@ -53,7 +53,9 @@
- #include <linux/seq_file.h>
- #include <linux/vfs.h>
- #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 62)
--# include <linux/vermagic.h>
-+# if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-+# include <linux/vermagic.h>
-+# endif
- #endif
- #include <VBox/err.h>
- #include <iprt/path.h>
diff --git a/meta-oe/recipes-support/vboxguestdrivers/vboxguestdrivers_6.1.12.bb b/meta-oe/recipes-support/vboxguestdrivers/vboxguestdrivers_6.1.12.bb
index e57df58d6c..6c036d403c 100644
--- a/meta-oe/recipes-support/vboxguestdrivers/vboxguestdrivers_6.1.12.bb
+++ b/meta-oe/recipes-support/vboxguestdrivers/vboxguestdrivers_6.1.12.bb
@@ -12,10 +12,7 @@ COMPATIBLE_MACHINE = "(qemux86|qemux86-64)"
VBOX_NAME = "VirtualBox-${PV}"
SRC_URI = "http://download.virtualbox.org/virtualbox/${PV}/${VBOX_NAME}.tar.bz2 \
- file://0001-fixes_for_mm_struct.patch \
- file://0002-fixes_for_module_memory.patch \
- file://0003-fixes_for_changes_in_cpu_tlbstate.patch \
- file://kernel-5.8-4.patch \
+ file://021-linux-5-8.patch \
file://Makefile.utils \
"
SRC_URI[md5sum] = "3c351f7fd6376e0bb3c8489505a9450c"