summaryrefslogtreecommitdiffstats
path: root/meta/recipes-core/glibc/glibc/CVE-2020-6096-2.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta/recipes-core/glibc/glibc/CVE-2020-6096-2.patch')
-rw-r--r--meta/recipes-core/glibc/glibc/CVE-2020-6096-2.patch111
1 files changed, 111 insertions, 0 deletions
diff --git a/meta/recipes-core/glibc/glibc/CVE-2020-6096-2.patch b/meta/recipes-core/glibc/glibc/CVE-2020-6096-2.patch
new file mode 100644
index 0000000000..bfb2d7e7f5
--- /dev/null
+++ b/meta/recipes-core/glibc/glibc/CVE-2020-6096-2.patch
@@ -0,0 +1,111 @@
+From beea361050728138b82c57dda0c4810402d342b9 Mon Sep 17 00:00:00 2001
+From: Alexander Anisimov <a.anisimov@omprussia.ru>
+Date: Wed, 8 Jul 2020 14:18:31 +0200
+Subject: [PATCH 2/2] arm: CVE-2020-6096: Fix multiarch memcpy for negative
+ length [BZ #25620]
+
+Unsigned branch instructions could be used for r2 to fix the wrong
+behavior when a negative length is passed to memcpy.
+This commit fixes the armv7 version.
+
+CVE: CVE-2020-6096
+Upstream-Status: Backport [git://sourceware.org/git/glibc.git]
+Signed-off-by: Zhixiong Chi <zhixiong.chi@windriver.com>
+---
+ sysdeps/arm/armv7/multiarch/memcpy_impl.S | 22 +++++++++++-----------
+ 1 file changed, 11 insertions(+), 11 deletions(-)
+
+diff --git a/sysdeps/arm/armv7/multiarch/memcpy_impl.S b/sysdeps/arm/armv7/multiarch/memcpy_impl.S
+index bf4ac7077f..379bb56fc9 100644
+--- a/sysdeps/arm/armv7/multiarch/memcpy_impl.S
++++ b/sysdeps/arm/armv7/multiarch/memcpy_impl.S
+@@ -268,7 +268,7 @@ ENTRY(memcpy)
+
+ mov dst, dstin /* Preserve dstin, we need to return it. */
+ cmp count, #64
+- bge .Lcpy_not_short
++ bhs .Lcpy_not_short
+ /* Deal with small copies quickly by dropping straight into the
+ exit block. */
+
+@@ -351,10 +351,10 @@ ENTRY(memcpy)
+
+ 1:
+ subs tmp2, count, #64 /* Use tmp2 for count. */
+- blt .Ltail63aligned
++ blo .Ltail63aligned
+
+ cmp tmp2, #512
+- bge .Lcpy_body_long
++ bhs .Lcpy_body_long
+
+ .Lcpy_body_medium: /* Count in tmp2. */
+ #ifdef USE_VFP
+@@ -378,7 +378,7 @@ ENTRY(memcpy)
+ add src, src, #64
+ vstr d1, [dst, #56]
+ add dst, dst, #64
+- bge 1b
++ bhs 1b
+ tst tmp2, #0x3f
+ beq .Ldone
+
+@@ -412,7 +412,7 @@ ENTRY(memcpy)
+ ldrd A_l, A_h, [src, #64]!
+ strd A_l, A_h, [dst, #64]!
+ subs tmp2, tmp2, #64
+- bge 1b
++ bhs 1b
+ tst tmp2, #0x3f
+ bne 1f
+ ldr tmp2,[sp], #FRAME_SIZE
+@@ -482,7 +482,7 @@ ENTRY(memcpy)
+ add src, src, #32
+
+ subs tmp2, tmp2, #prefetch_lines * 64 * 2
+- blt 2f
++ blo 2f
+ 1:
+ cpy_line_vfp d3, 0
+ cpy_line_vfp d4, 64
+@@ -494,7 +494,7 @@ ENTRY(memcpy)
+ add dst, dst, #2 * 64
+ add src, src, #2 * 64
+ subs tmp2, tmp2, #prefetch_lines * 64
+- bge 1b
++ bhs 1b
+
+ 2:
+ cpy_tail_vfp d3, 0
+@@ -615,8 +615,8 @@ ENTRY(memcpy)
+ 1:
+ pld [src, #(3 * 64)]
+ subs count, count, #64
+- ldrmi tmp2, [sp], #FRAME_SIZE
+- bmi .Ltail63unaligned
++ ldrlo tmp2, [sp], #FRAME_SIZE
++ blo .Ltail63unaligned
+ pld [src, #(4 * 64)]
+
+ #ifdef USE_NEON
+@@ -633,7 +633,7 @@ ENTRY(memcpy)
+ neon_load_multi d0-d3, src
+ neon_load_multi d4-d7, src
+ subs count, count, #64
+- bmi 2f
++ blo 2f
+ 1:
+ pld [src, #(4 * 64)]
+ neon_store_multi d0-d3, dst
+@@ -641,7 +641,7 @@ ENTRY(memcpy)
+ neon_store_multi d4-d7, dst
+ neon_load_multi d4-d7, src
+ subs count, count, #64
+- bpl 1b
++ bhs 1b
+ 2:
+ neon_store_multi d0-d3, dst
+ neon_store_multi d4-d7, dst
+--
+2.17.0
+