aboutsummaryrefslogtreecommitdiffstats
path: root/recipes/xorg-lib/pixman-0.12.0
diff options
context:
space:
mode:
Diffstat (limited to 'recipes/xorg-lib/pixman-0.12.0')
-rw-r--r--recipes/xorg-lib/pixman-0.12.0/pixman-arm.patch632
-rw-r--r--recipes/xorg-lib/pixman-0.12.0/pixman-x888-565.patch68
2 files changed, 700 insertions, 0 deletions
diff --git a/recipes/xorg-lib/pixman-0.12.0/pixman-arm.patch b/recipes/xorg-lib/pixman-0.12.0/pixman-arm.patch
new file mode 100644
index 0000000000..91dda03b7c
--- /dev/null
+++ b/recipes/xorg-lib/pixman-0.12.0/pixman-arm.patch
@@ -0,0 +1,632 @@
+From: Jeff Muizelaar <jmuizelaar@mozilla.com>
+Date: Wed, 17 Sep 2008 19:53:20 +0000 (-0400)
+Subject: Add support for ARMv6 SIMD fastpaths.
+X-Git-Url: http://gitweb.freedesktop.org/?p=pixman.git;a=commitdiff;h=d0b181f347ef4720d130beee3f03196afbd28aba
+
+Add support for ARMv6 SIMD fastpaths.
+---
+
+--- a/configure.ac
++++ b/configure.ac
+@@ -277,6 +277,44 @@ AC_SUBST(VMX_CFLAGS)
+
+ AM_CONDITIONAL(USE_VMX, test $have_vmx_intrinsics = yes)
+
++dnl Check for ARM
++
++have_armv6_simd=no
++AC_MSG_CHECKING(whether to use ARM assembler)
++xserver_save_CFLAGS=$CFLAGS
++CFLAGS="$CFLAGS $ARM_CFLAGS"
++AC_COMPILE_IFELSE([
++int main () {
++ asm("uqadd8 r1, r1, r2");
++ return 0;
++}], have_armv6_simd=yes)
++CFLAGS=$xserver_save_CFLAGS
++
++AC_ARG_ENABLE(arm,
++ [AC_HELP_STRING([--disable-arm],
++ [disable ARM fast paths])],
++ [enable_arm=$enableval], [enable_arm=auto])
++
++if test $enable_arm = no ; then
++ have_armv6_simd=disabled
++fi
++
++if test $have_armv6_simd = yes ; then
++ AC_DEFINE(USE_ARM, 1, [use ARM compiler intrinsics])
++else
++ ARM_CFLAGS=
++fi
++
++AC_MSG_RESULT($have_armv6_simd)
++if test $enable_arm = yes && test $have_armv6_simd = no ; then
++ AC_MSG_ERROR([ARM intrinsics not detected])
++fi
++
++AC_SUBST(ARM_CFLAGS)
++
++AM_CONDITIONAL(USE_ARM, test $have_armv6_simd = yes)
++
++
+ AC_ARG_ENABLE(gtk,
+ [AC_HELP_STRING([--enable-gtk],
+ [enable tests using GTK+ [default=auto]])],
+--- a/pixman/Makefile.am
++++ b/pixman/Makefile.am
+@@ -79,3 +79,15 @@ libpixman_sse2_la_LIBADD = $(DEP_LIBS)
+ libpixman_1_la_LIBADD += libpixman-sse2.la
+ endif
+
++# arm code
++if USE_ARM
++noinst_LTLIBRARIES += libpixman-arm.la
++libpixman_arm_la_SOURCES = \
++ pixman-arm.c \
++ pixman-arm.h
++libpixman_arm_la_CFLAGS = $(DEP_CFLAGS) $(ARM_CFLAGS)
++libpixman_arm_la_LIBADD = $(DEP_LIBS)
++libpixman_1_la_LIBADD += libpixman-arm.la
++endif
++
++
+--- /dev/null
++++ b/pixman/pixman-arm.c
+@@ -0,0 +1,409 @@
++/*
++ * Copyright © 2008 Mozilla Corporation
++ *
++ * Permission to use, copy, modify, distribute, and sell this software and its
++ * documentation for any purpose is hereby granted without fee, provided that
++ * the above copyright notice appear in all copies and that both that
++ * copyright notice and this permission notice appear in supporting
++ * documentation, and that the name of Mozilla Corporation not be used in
++ * advertising or publicity pertaining to distribution of the software without
++ * specific, written prior permission. Mozilla Corporation makes no
++ * representations about the suitability of this software for any purpose. It
++ * is provided "as is" without express or implied warranty.
++ *
++ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
++ * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
++ * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
++ * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
++ * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
++ * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
++ * SOFTWARE.
++ *
++ * Author: Jeff Muizelaar (jeff@infidigm.net)
++ *
++ */
++#ifdef HAVE_CONFIG_H
++#include <config.h>
++#endif
++
++#include "pixman-arm.h"
++
++void
++fbCompositeSrcAdd_8000x8000arm (pixman_op_t op,
++ pixman_image_t * pSrc,
++ pixman_image_t * pMask,
++ pixman_image_t * pDst,
++ int16_t xSrc,
++ int16_t ySrc,
++ int16_t xMask,
++ int16_t yMask,
++ int16_t xDst,
++ int16_t yDst,
++ uint16_t width,
++ uint16_t height)
++{
++ uint8_t *dstLine, *dst;
++ uint8_t *srcLine, *src;
++ int dstStride, srcStride;
++ uint16_t w;
++ uint8_t s, d;
++
++ fbComposeGetStart (pSrc, xSrc, ySrc, uint8_t, srcStride, srcLine, 1);
++ fbComposeGetStart (pDst, xDst, yDst, uint8_t, dstStride, dstLine, 1);
++
++ while (height--)
++ {
++ dst = dstLine;
++ dstLine += dstStride;
++ src = srcLine;
++ srcLine += srcStride;
++ w = width;
++
++ while (w && (unsigned long)dst & 3)
++ {
++ s = *src;
++ d = *dst;
++ asm("uqadd8 %0, %1, %2" : "+r"(d) : "r"(s));
++ *dst = d;
++
++ dst++;
++ src++;
++ w--;
++ }
++
++ while (w >= 4)
++ {
++ asm("uqadd8 %0, %1, %2" : "=r"(*(uint32_t*)dst) : "r"(*(uint32_t*)src), "r"(*(uint32_t*)dst));
++ dst += 4;
++ src += 4;
++ w -= 4;
++ }
++
++ while (w)
++ {
++ s = *src;
++ d = *dst;
++ asm("uqadd8 %0, %1, %2" : "+r"(d) : "r"(s));
++ *dst = d;
++
++ dst++;
++ src++;
++ w--;
++ }
++ }
++
++}
++
++void
++fbCompositeSrc_8888x8888arm (pixman_op_t op,
++ pixman_image_t * pSrc,
++ pixman_image_t * pMask,
++ pixman_image_t * pDst,
++ int16_t xSrc,
++ int16_t ySrc,
++ int16_t xMask,
++ int16_t yMask,
++ int16_t xDst,
++ int16_t yDst,
++ uint16_t width,
++ uint16_t height)
++{
++ uint32_t *dstLine, *dst;
++ uint32_t *srcLine, *src;
++ int dstStride, srcStride;
++ uint16_t w;
++ uint32_t component_half = 0x800080;
++ uint32_t upper_component_mask = 0xff00ff00;
++ uint32_t alpha_mask = 0xff;
++
++ fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
++ fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
++
++ while (height--)
++ {
++ dst = dstLine;
++ dstLine += dstStride;
++ src = srcLine;
++ srcLine += srcStride;
++ w = width;
++
++//#define inner_branch
++ asm volatile (
++ "cmp %[w], #0\n\t"
++ "beq 2f\n\t"
++ "1:\n\t"
++ /* load dest */
++ "ldr r5, [%[src]], #4\n\t"
++#ifdef inner_branch
++ /* We can avoid doing the multiplication in two cases: 0x0 or 0xff.
++ * The 0x0 case also allows us to avoid doing an unecessary data
++ * write which is more valuable so we only check for that */
++ "cmp r5, #0x1000000\n\t"
++ "blt 3f\n\t"
++
++ /* = 255 - alpha */
++ "sub r8, %[alpha_mask], r5, lsr #24\n\t"
++
++ "ldr r4, [%[dest]] \n\t"
++
++#else
++ "ldr r4, [%[dest]] \n\t"
++
++ /* = 255 - alpha */
++ "sub r8, %[alpha_mask], r5, lsr #24\n\t"
++#endif
++ "uxtb16 r6, r4\n\t"
++ "uxtb16 r7, r4, ror #8\n\t"
++
++ /* multiply by 257 and divide by 65536 */
++ "mla r6, r6, r8, %[component_half]\n\t"
++ "mla r7, r7, r8, %[component_half]\n\t"
++
++ "uxtab16 r6, r6, r6, ror #8\n\t"
++ "uxtab16 r7, r7, r7, ror #8\n\t"
++
++ /* recombine the 0xff00ff00 bytes of r6 and r7 */
++ "and r7, %[upper_component_mask]\n\t"
++ "uxtab16 r6, r7, r6, ror #8\n\t"
++
++ "uqadd8 r5, r6, r5\n\t"
++
++#ifdef inner_branch
++ "3:\n\t"
++
++#endif
++ "str r5, [%[dest]], #4\n\t"
++ /* increment counter and jmp to top */
++ "subs %[w], %[w], #1\n\t"
++ "bne 1b\n\t"
++ "2:\n\t"
++ : [w] "+r" (w), [dest] "+r" (dst), [src] "+r" (src)
++ : [component_half] "r" (component_half), [upper_component_mask] "r" (upper_component_mask),
++ [alpha_mask] "r" (alpha_mask)
++ : "r4", "r5", "r6", "r7", "r8", "cc", "memory"
++ );
++ }
++}
++
++void
++fbCompositeSrc_8888x8x8888arm (pixman_op_t op,
++ pixman_image_t * pSrc,
++ pixman_image_t * pMask,
++ pixman_image_t * pDst,
++ int16_t xSrc,
++ int16_t ySrc,
++ int16_t xMask,
++ int16_t yMask,
++ int16_t xDst,
++ int16_t yDst,
++ uint16_t width,
++ uint16_t height)
++{
++ uint32_t *dstLine, *dst;
++ uint32_t *srcLine, *src;
++ uint32_t mask;
++ int dstStride, srcStride;
++ uint16_t w;
++ uint32_t component_half = 0x800080;
++ uint32_t alpha_mask = 0xff;
++
++ fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
++ fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
++
++ fbComposeGetSolid (pMask, mask, pDst->bits.format);
++ mask = (mask) >> 24;
++
++ while (height--)
++ {
++ dst = dstLine;
++ dstLine += dstStride;
++ src = srcLine;
++ srcLine += srcStride;
++ w = width;
++
++//#define inner_branch
++ asm volatile (
++ "cmp %[w], #0\n\t"
++ "beq 2f\n\t"
++ "1:\n\t"
++ /* load dest */
++ "ldr r5, [%[src]], #4\n\t"
++#ifdef inner_branch
++ /* We can avoid doing the multiplication in two cases: 0x0 or 0xff.
++ * The 0x0 case also allows us to avoid doing an unecessary data
++ * write which is more valuable so we only check for that */
++ "cmp r5, #0x1000000\n\t"
++ "blt 3f\n\t"
++
++#endif
++ "ldr r4, [%[dest]] \n\t"
++
++ "uxtb16 r6, r5\n\t"
++ "uxtb16 r7, r5, ror #8\n\t"
++
++ /* multiply by alpha (r8) then by 257 and divide by 65536 */
++ "mla r6, r6, %[mask_alpha], %[component_half]\n\t"
++ "mla r7, r7, %[mask_alpha], %[component_half]\n\t"
++
++ "uxtab16 r6, r6, r6, ror #8\n\t"
++ "uxtab16 r7, r7, r7, ror #8\n\t"
++
++ "uxtb16 r6, r6, ror #8\n\t"
++ "uxtb16 r7, r7, ror #8\n\t"
++
++ /* recombine */
++ "orr r5, r6, r7, lsl #8\n\t"
++
++ "uxtb16 r6, r4\n\t"
++ "uxtb16 r7, r4, ror #8\n\t"
++
++ /* 255 - alpha */
++ "sub r8, %[alpha_mask], r5, lsr #24\n\t"
++
++ /* multiply by alpha (r8) then by 257 and divide by 65536 */
++ "mla r6, r6, r8, %[component_half]\n\t"
++ "mla r7, r7, r8, %[component_half]\n\t"
++
++ "uxtab16 r6, r6, r6, ror #8\n\t"
++ "uxtab16 r7, r7, r7, ror #8\n\t"
++
++ "uxtb16 r6, r6, ror #8\n\t"
++ "uxtb16 r7, r7, ror #8\n\t"
++
++ /* recombine */
++ "orr r6, r6, r7, lsl #8\n\t"
++
++ "uqadd8 r5, r6, r5\n\t"
++
++#ifdef inner_branch
++ "3:\n\t"
++
++#endif
++ "str r5, [%[dest]], #4\n\t"
++ /* increment counter and jmp to top */
++ "subs %[w], %[w], #1\n\t"
++ "bne 1b\n\t"
++ "2:\n\t"
++ : [w] "+r" (w), [dest] "+r" (dst), [src] "+r" (src)
++ : [component_half] "r" (component_half), [mask_alpha] "r" (mask),
++ [alpha_mask] "r" (alpha_mask)
++ : "r4", "r5", "r6", "r7", "r8", "r9", "cc", "memory"
++ );
++ }
++}
++
++void
++fbCompositeSolidMask_nx8x8888arm (pixman_op_t op,
++ pixman_image_t * pSrc,
++ pixman_image_t * pMask,
++ pixman_image_t * pDst,
++ int16_t xSrc,
++ int16_t ySrc,
++ int16_t xMask,
++ int16_t yMask,
++ int16_t xDst,
++ int16_t yDst,
++ uint16_t width,
++ uint16_t height)
++{
++ uint32_t src, srca;
++ uint32_t *dstLine, *dst;
++ uint8_t *maskLine, *mask;
++ int dstStride, maskStride;
++ uint16_t w;
++
++ fbComposeGetSolid(pSrc, src, pDst->bits.format);
++
++ srca = src >> 24;
++ if (src == 0)
++ return;
++
++ uint32_t component_mask = 0xff00ff;
++ uint32_t component_half = 0x800080;
++
++ uint32_t src_hi = (src >> 8) & component_mask;
++ uint32_t src_lo = src & component_mask;
++
++ fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
++ fbComposeGetStart (pMask, xMask, yMask, uint8_t, maskStride, maskLine, 1);
++
++ while (height--)
++ {
++ dst = dstLine;
++ dstLine += dstStride;
++ mask = maskLine;
++ maskLine += maskStride;
++ w = width;
++
++//#define inner_branch
++ asm volatile (
++ "cmp %[w], #0\n\t"
++ "beq 2f\n\t"
++ "1:\n\t"
++ /* load mask */
++ "ldrb r5, [%[mask]], #1\n\t"
++#ifdef inner_branch
++ /* We can avoid doing the multiplication in two cases: 0x0 or 0xff.
++ * The 0x0 case also allows us to avoid doing an unecessary data
++ * write which is more valuable so we only check for that */
++ /* 0x1000000 is the least value that contains alpha all values
++ * less than it have a 0 alpha value */
++ "cmp r5, #0x0\n\t"
++ "beq 3f\n\t"
++
++#endif
++ "ldr r4, [%[dest]] \n\t"
++
++ /* multiply by alpha (r8) then by 257 and divide by 65536 */
++ "mla r6, %[src_lo], r5, %[component_half]\n\t"
++ "mla r7, %[src_hi], r5, %[component_half]\n\t"
++
++ "uxtab16 r6, r6, r6, ror #8\n\t"
++ "uxtab16 r7, r7, r7, ror #8\n\t"
++
++ "uxtb16 r6, r6, ror #8\n\t"
++ "uxtb16 r7, r7, ror #8\n\t"
++
++ /* recombine */
++ "orr r5, r6, r7, lsl #8\n\t"
++
++ "uxtb16 r6, r4\n\t"
++ "uxtb16 r7, r4, ror #8\n\t"
++
++ /* we could simplify this to use 'sub' if we were
++ * willing to give up a register for alpha_mask */
++ "mvn r8, r5\n\t"
++ "mov r8, r8, lsr #24\n\t"
++
++ /* multiply by alpha (r8) then by 257 and divide by 65536 */
++ "mla r6, r6, r8, %[component_half]\n\t"
++ "mla r7, r7, r8, %[component_half]\n\t"
++
++ "uxtab16 r6, r6, r6, ror #8\n\t"
++ "uxtab16 r7, r7, r7, ror #8\n\t"
++
++ "uxtb16 r6, r6, ror #8\n\t"
++ "uxtb16 r7, r7, ror #8\n\t"
++
++ /* recombine */
++ "orr r6, r6, r7, lsl #8\n\t"
++
++ "uqadd8 r5, r6, r5\n\t"
++
++#ifdef inner_branch
++ "3:\n\t"
++
++#endif
++ "str r5, [%[dest]], #4\n\t"
++ /* increment counter and jmp to top */
++ "subs %[w], %[w], #1\n\t"
++ "bne 1b\n\t"
++ "2:\n\t"
++ : [w] "+r" (w), [dest] "+r" (dst), [src] "+r" (src), [mask] "+r" (mask)
++ : [component_half] "r" (component_half),
++ [src_hi] "r" (src_hi), [src_lo] "r" (src_lo)
++ : "r4", "r5", "r6", "r7", "r8", "cc", "memory"
++ );
++ }
++}
+--- /dev/null
++++ b/pixman/pixman-arm.h
+@@ -0,0 +1,94 @@
++/*
++ * Copyright © 2008 Mozilla Corporation
++ *
++ * Permission to use, copy, modify, distribute, and sell this software and its
++ * documentation for any purpose is hereby granted without fee, provided that
++ * the above copyright notice appear in all copies and that both that
++ * copyright notice and this permission notice appear in supporting
++ * documentation, and that the name of Mozilla Corporation not be used in
++ * advertising or publicity pertaining to distribution of the software without
++ * specific, written prior permission. Mozilla Corporation makes no
++ * representations about the suitability of this software for any purpose. It
++ * is provided "as is" without express or implied warranty.
++ *
++ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
++ * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
++ * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
++ * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
++ * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
++ * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
++ * SOFTWARE.
++ *
++ * Author: Jeff Muizelaar (jeff@infidigm.net)
++ *
++ */
++
++#include "pixman-private.h"
++
++#ifdef USE_ARM
++
++static inline pixman_bool_t pixman_have_arm(void) { return TRUE; }
++
++#else
++#define pixman_have_arm() FALSE
++#endif
++
++#ifdef USE_ARM
++
++void
++fbCompositeSrcAdd_8000x8000arm (pixman_op_t op,
++ pixman_image_t * pSrc,
++ pixman_image_t * pMask,
++ pixman_image_t * pDst,
++ int16_t xSrc,
++ int16_t ySrc,
++ int16_t xMask,
++ int16_t yMask,
++ int16_t xDst,
++ int16_t yDst,
++ uint16_t width,
++ uint16_t height);
++void
++fbCompositeSrc_8888x8888arm (pixman_op_t op,
++ pixman_image_t * pSrc,
++ pixman_image_t * pMask,
++ pixman_image_t * pDst,
++ int16_t xSrc,
++ int16_t ySrc,
++ int16_t xMask,
++ int16_t yMask,
++ int16_t xDst,
++ int16_t yDst,
++ uint16_t width,
++ uint16_t height);
++
++void
++fbCompositeSrc_8888x8x8888arm (pixman_op_t op,
++ pixman_image_t * pSrc,
++ pixman_image_t * pMask,
++ pixman_image_t * pDst,
++ int16_t xSrc,
++ int16_t ySrc,
++ int16_t xMask,
++ int16_t yMask,
++ int16_t xDst,
++ int16_t yDst,
++ uint16_t width,
++ uint16_t height);
++void
++fbCompositeSolidMask_nx8x8888arm (pixman_op_t op,
++ pixman_image_t * pSrc,
++ pixman_image_t * pMask,
++ pixman_image_t * pDst,
++ int16_t xSrc,
++ int16_t ySrc,
++ int16_t xMask,
++ int16_t yMask,
++ int16_t xDst,
++ int16_t yDst,
++ uint16_t width,
++ uint16_t height);
++
++
++#endif /* USE_ARM */
+--- a/pixman/pixman-pict.c
++++ b/pixman/pixman-pict.c
+@@ -34,6 +34,7 @@
+ #include "pixman-mmx.h"
+ #include "pixman-vmx.h"
+ #include "pixman-sse2.h"
++#include "pixman-arm.h"
+ #include "pixman-combine32.h"
+
+ #ifdef __GNUC__
+@@ -1479,6 +1480,26 @@ static const FastPathInfo vmx_fast_paths
+ };
+ #endif
+
++#ifdef USE_ARM
++static const FastPathInfo arm_fast_paths[] =
++{
++ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, fbCompositeSrc_8888x8888arm, 0 },
++ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, fbCompositeSrc_8888x8888arm, 0 },
++ { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, fbCompositeSrc_8888x8888arm, 0 },
++ { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, fbCompositeSrc_8888x8888arm, 0 },
++ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8, PIXMAN_a8r8g8b8, fbCompositeSrc_8888x8x8888arm, NEED_SOLID_MASK },
++ { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8, PIXMAN_x8r8g8b8, fbCompositeSrc_8888x8x8888arm, NEED_SOLID_MASK },
++
++ { PIXMAN_OP_ADD, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, fbCompositeSrcAdd_8000x8000arm, 0 },
++
++ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8r8g8b8, fbCompositeSolidMask_nx8x8888arm, 0 },
++ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8r8g8b8, fbCompositeSolidMask_nx8x8888arm, 0 },
++ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8b8g8r8, fbCompositeSolidMask_nx8x8888arm, 0 },
++ { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8b8g8r8, fbCompositeSolidMask_nx8x8888arm, 0 },
++
++ { PIXMAN_OP_NONE },
++};
++#endif
+
+ static const FastPathInfo c_fast_paths[] =
+ {
+@@ -1829,6 +1850,12 @@ pixman_image_composite (pixman_op_t
+ if (!info && pixman_have_vmx())
+ info = get_fast_path (vmx_fast_paths, op, pSrc, pMask, pDst, pixbuf);
+ #endif
++
++#ifdef USE_ARM
++ if (!info && pixman_have_arm())
++ info = get_fast_path (arm_fast_paths, op, pSrc, pMask, pDst, pixbuf);
++#endif
++
+ if (!info)
+ info = get_fast_path (c_fast_paths, op, pSrc, pMask, pDst, pixbuf);
+
diff --git a/recipes/xorg-lib/pixman-0.12.0/pixman-x888-565.patch b/recipes/xorg-lib/pixman-0.12.0/pixman-x888-565.patch
new file mode 100644
index 0000000000..a3fa331710
--- /dev/null
+++ b/recipes/xorg-lib/pixman-0.12.0/pixman-x888-565.patch
@@ -0,0 +1,68 @@
+From: Vladimir Vukicevic <vladimir@slide.(none)>
+Date: Wed, 17 Sep 2008 20:01:31 +0000 (-0400)
+Subject: Add SRC x888x0565 C fast path
+X-Git-Url: http://gitweb.freedesktop.org/?p=pixman.git;a=commitdiff;h=7180230d4d87c55dfef1e17a0cc3b125d45aa3a0
+
+Add SRC x888x0565 C fast path
+---
+
+--- a/pixman/pixman-pict.c
++++ b/pixman/pixman-pict.c
+@@ -759,6 +759,46 @@ fbCompositeSrc_8888x0565 (pixman_op_t op
+ }
+ }
+
++
++void
++fbCompositeSrc_x888x0565 (pixman_op_t op,
++ pixman_image_t * pSrc,
++ pixman_image_t * pMask,
++ pixman_image_t * pDst,
++ int16_t xSrc,
++ int16_t ySrc,
++ int16_t xMask,
++ int16_t yMask,
++ int16_t xDst,
++ int16_t yDst,
++ uint16_t width,
++ uint16_t height)
++{
++ uint16_t *dstLine, *dst;
++ uint32_t *srcLine, *src, s;
++ int dstStride, srcStride;
++ uint16_t w;
++
++ fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
++ fbComposeGetStart (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
++
++ while (height--)
++ {
++ dst = dstLine;
++ dstLine += dstStride;
++ src = srcLine;
++ srcLine += srcStride;
++ w = width;
++
++ while (w--)
++ {
++ s = READ(pSrc, src++);
++ WRITE(pDst, dst, cvt8888to0565(s));
++ dst++;
++ }
++ }
++}
++
+ void
+ fbCompositeSrcAdd_8000x8000 (pixman_op_t op,
+ pixman_image_t * pSrc,
+@@ -1568,6 +1608,10 @@ static const FastPathInfo c_fast_paths[]
+ { PIXMAN_OP_SRC, PIXMAN_r5g6b5, PIXMAN_null, PIXMAN_r5g6b5, fbCompositeSrcSrc_nxn, 0 },
+ { PIXMAN_OP_SRC, PIXMAN_b5g6r5, PIXMAN_null, PIXMAN_b5g6r5, fbCompositeSrcSrc_nxn, 0 },
+ #endif
++ { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, fbCompositeSrc_x888x0565, 0 },
++ { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, fbCompositeSrc_x888x0565, 0 },
++ { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, fbCompositeSrc_x888x0565, 0 },
++ { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, fbCompositeSrc_x888x0565, 0 },
+ { PIXMAN_OP_IN, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, fbCompositeSrcIn_8x8, 0 },
+ { PIXMAN_OP_IN, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8, fbCompositeSolidMaskIn_nx8x8, 0 },
+ { PIXMAN_OP_NONE },