aboutsummaryrefslogtreecommitdiffstats
path: root/recipes/xorg-lib/pixman-0.21.6/0030-ARM-tweaked-horizontal-weights-update-in-NEON-biline.patch
diff options
context:
space:
mode:
Diffstat (limited to 'recipes/xorg-lib/pixman-0.21.6/0030-ARM-tweaked-horizontal-weights-update-in-NEON-biline.patch')
-rw-r--r--recipes/xorg-lib/pixman-0.21.6/0030-ARM-tweaked-horizontal-weights-update-in-NEON-biline.patch82
1 files changed, 82 insertions, 0 deletions
diff --git a/recipes/xorg-lib/pixman-0.21.6/0030-ARM-tweaked-horizontal-weights-update-in-NEON-biline.patch b/recipes/xorg-lib/pixman-0.21.6/0030-ARM-tweaked-horizontal-weights-update-in-NEON-biline.patch
new file mode 100644
index 0000000000..831065cb3e
--- /dev/null
+++ b/recipes/xorg-lib/pixman-0.21.6/0030-ARM-tweaked-horizontal-weights-update-in-NEON-biline.patch
@@ -0,0 +1,82 @@
+From 4a0ade2a1e96fe3f1bca8953be221af0b2908925 Mon Sep 17 00:00:00 2001
+From: Siarhei Siamashka <siarhei.siamashka@nokia.com>
+Date: Thu, 10 Mar 2011 15:34:10 +0200
+Subject: [PATCH 30/40] ARM: tweaked horizontal weights update in NEON bilinear scaling code
+
+Moving horizontal interpolation weights update instructions from the
+beginning of loop to its end allows to hide some pipeline stalls and
+improve performance.
+---
+ pixman/pixman-arm-neon-asm.S | 20 +++++++++++---------
+ 1 files changed, 11 insertions(+), 9 deletions(-)
+
+diff --git a/pixman/pixman-arm-neon-asm.S b/pixman/pixman-arm-neon-asm.S
+index 71b30ac..8788e95 100644
+--- a/pixman/pixman-arm-neon-asm.S
++++ b/pixman/pixman-arm-neon-asm.S
+@@ -2558,8 +2558,7 @@ fname:
+ bilinear_load_&src_fmt d0, d1, d2
+ vmull.u8 q1, d0, d28
+ vmlal.u8 q1, d1, d29
+- vshr.u16 d30, d24, #8
+- /* 4 cycles bubble */
++ /* 5 cycles bubble */
+ vshll.u16 q0, d2, #8
+ vmlsl.u16 q0, d2, d30
+ vmlal.u16 q0, d3, d30
+@@ -2574,17 +2573,17 @@ fname:
+ .macro bilinear_interpolate_two_pixels src_fmt, dst_fmt
+ bilinear_load_and_vertical_interpolate_two_&src_fmt \
+ q1, q11, d0, d1, d20, d21, d22, d23
+- vshr.u16 q15, q12, #8
+- vadd.u16 q12, q12, q13
+ vshll.u16 q0, d2, #8
+ vmlsl.u16 q0, d2, d30
+ vmlal.u16 q0, d3, d30
+ vshll.u16 q10, d22, #8
+ vmlsl.u16 q10, d22, d31
+ vmlal.u16 q10, d23, d31
+- vshrn.u32 d30, q0, #16
+- vshrn.u32 d31, q10, #16
+- vmovn.u16 d0, q15
++ vshrn.u32 d0, q0, #16
++ vshrn.u32 d1, q10, #16
++ vshr.u16 q15, q12, #8
++ vadd.u16 q12, q12, q13
++ vmovn.u16 d0, q0
+ bilinear_store_&dst_fmt 2, q2, q3
+ .endm
+
+@@ -2593,8 +2592,6 @@ fname:
+ q1, q11, d0, d1, d20, d21, d22, d23 \
+ q3, q9, d4, d5, d16, d17, d18, d19
+ pld [TMP1, PF_OFFS]
+- vshr.u16 q15, q12, #8
+- vadd.u16 q12, q12, q13
+ vshll.u16 q0, d2, #8
+ vmlsl.u16 q0, d2, d30
+ vmlal.u16 q0, d3, d30
+@@ -2614,8 +2611,10 @@ fname:
+ vshrn.u32 d1, q10, #16
+ vshrn.u32 d4, q2, #16
+ vshrn.u32 d5, q8, #16
++ vshr.u16 q15, q12, #8
+ vmovn.u16 d0, q0
+ vmovn.u16 d1, q2
++ vadd.u16 q12, q12, q13
+ bilinear_store_&dst_fmt 4, q2, q3
+ .endm
+
+@@ -2669,6 +2668,9 @@ pixman_asm_function fname
+ vadd.u16 d25, d25, d26
+ vadd.u16 q13, q13, q13
+
++ vshr.u16 q15, q12, #8
++ vadd.u16 q12, q12, q13
++
+ subs WIDTH, WIDTH, #4
+ blt 1f
+ mov PF_OFFS, PF_OFFS, asr #(16 - bpp_shift)
+--
+1.6.6.1
+