From 2a6e3cf63f58e289802a11faad5fb495e2d04e97 Mon Sep 17 00:00:00 2001 From: vboxsync Date: Wed, 9 Dec 2020 18:59:04 +0000 Subject: [PATCH] Runtime/memobj-r0drv-linux.c: Changes to support the upcoming 5.10 kernel, bugref:9879 Upstream-Status: Backport git-svn-id: http://www.virtualbox.org/svn/vbox@87074 cfe28804-0f27-0410-a406-dd0f0b0b656f Signed-off-by: Bruce Ashfield --- .../Runtime/r0drv/linux/memobj-r0drv-linux.c | 68 ++++++++++++++++++- 1 file changed, 67 insertions(+), 1 deletion(-) --- a/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c +++ b/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c @@ -56,9 +56,19 @@ * Whether we use alloc_vm_area (3.2+) for executable memory. * This is a must for 5.8+, but we enable it all the way back to 3.2.x for * better W^R compliance (fExecutable flag). */ -#if RTLNX_VER_MIN(3,2,0) || defined(DOXYGEN_RUNNING) +#if RTLNX_VER_RANGE(3,2,0, 5,10,0) || defined(DOXYGEN_RUNNING) # define IPRT_USE_ALLOC_VM_AREA_FOR_EXEC #endif +/** @def IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC + * alloc_vm_area was removed with 5.10 so we have to resort to a different way + * to allocate executable memory. + * It would be possible to remove IPRT_USE_ALLOC_VM_AREA_FOR_EXEC and use + * this path execlusively for 3.2+ but no time to test it really works on every + * supported kernel, so better play safe for now. + */ +#if RTLNX_VER_MIN(5,10,0) || defined(DOXYGEN_RUNNING) +# define IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC +#endif /* * 2.6.29+ kernels don't work with remap_pfn_range() anymore because @@ -502,6 +512,46 @@ static void rtR0MemObjLinuxFreePages(PRT } +#ifdef IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC +/** + * User data passed to the apply_to_page_range() callback. + */ +typedef struct LNXAPPLYPGRANGE +{ + /** Pointer to the memory object. */ + PRTR0MEMOBJLNX pMemLnx; + /** The page protection flags to apply. */ + pgprot_t fPg; +} LNXAPPLYPGRANGE; +/** Pointer to the user data. */ +typedef LNXAPPLYPGRANGE *PLNXAPPLYPGRANGE; +/** Pointer to the const user data. */ +typedef const LNXAPPLYPGRANGE *PCLNXAPPLYPGRANGE; + +/** + * Callback called in apply_to_page_range(). + * + * @returns Linux status code. + * @param pPte Pointer to the page table entry for the given address. + * @param uAddr The address to apply the new protection to. + * @param pvUser The opaque user data. + */ +#ifdef __i386__ +static int rtR0MemObjLinuxApplyPageRange(pte_t *pPte, unsigned long uAddr, void *pvUser) +#else +static DECLCALLBACK(int) rtR0MemObjLinuxApplyPageRange(pte_t *pPte, unsigned long uAddr, void *pvUser) +#endif +{ + PCLNXAPPLYPGRANGE pArgs = (PCLNXAPPLYPGRANGE)pvUser; + PRTR0MEMOBJLNX pMemLnx = pArgs->pMemLnx; + size_t idxPg = (uAddr - (unsigned long)pMemLnx->Core.pv) >> PAGE_SHIFT; + + set_pte(pPte, mk_pte(pMemLnx->apPages[idxPg], pArgs->fPg)); + return 0; +} +#endif + + /** * Maps the allocation into ring-0. * @@ -584,6 +634,11 @@ static int rtR0MemObjLinuxVMap(PRTR0MEMO else # endif { +# if defined(IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC) + if (fExecutable) + pgprot_val(fPg) |= _PAGE_NX; /* Uses RTR0MemObjProtect to clear NX when memory ready, W^X fashion. */ +# endif + # ifdef VM_MAP pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_MAP, fPg); # else @@ -1851,6 +1906,21 @@ DECLHIDDEN(int) rtR0MemObjNativeProtect( preempt_enable(); return VINF_SUCCESS; } +# elif defined(IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC) + PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)pMem; + if ( pMemLnx->fExecutable + && pMemLnx->fMappedToRing0) + { + LNXAPPLYPGRANGE Args; + Args.pMemLnx = pMemLnx; + Args.fPg = rtR0MemObjLinuxConvertProt(fProt, true /*fKernel*/); + int rcLnx = apply_to_page_range(current->active_mm, (unsigned long)pMemLnx->Core.pv + offSub, cbSub, + rtR0MemObjLinuxApplyPageRange, (void *)&Args); + if (rcLnx) + return VERR_NOT_SUPPORTED; + + return VINF_SUCCESS; + } # endif NOREF(pMem);