aboutsummaryrefslogtreecommitdiffstats
path: root/recipes/gcc
diff options
context:
space:
mode:
authorKhem Raj <raj.khem@gmail.com>2010-10-26 09:24:19 -0700
committerKhem Raj <raj.khem@gmail.com>2010-10-26 09:27:16 -0700
commitf0a0500df04218ca4c921edef8fc7a2eceff1bc7 (patch)
tree768e48f43468c462dd7da369f799b5c5f50b8e73 /recipes/gcc
parent27e60dd004b46f79979e4485a2b3eef836671e7c (diff)
downloadopenembedded-f0a0500df04218ca4c921edef8fc7a2eceff1bc7.tar.gz
gcc-4.5: Import recent linaro patches
* These are selected patches that are applied on latest linaro 4.5 gcc Signed-off-by: Khem Raj <raj.khem@gmail.com>
Diffstat (limited to 'recipes/gcc')
-rw-r--r--recipes/gcc/gcc-4.5.inc16
-rw-r--r--recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99402.patch1268
-rw-r--r--recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99403.patch176
-rw-r--r--recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99404.patch386
-rw-r--r--recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99405.patch36
-rw-r--r--recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99406.patch20
-rw-r--r--recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99407.patch33
-rw-r--r--recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99408.patch603
-rw-r--r--recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99409.patch18
-rw-r--r--recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99410.patch32
-rw-r--r--recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99411.patch21
-rw-r--r--recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99412.patch316
-rw-r--r--recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99413.patch26
-rw-r--r--recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99414.patch36
-rw-r--r--recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99415.patch46
15 files changed, 3032 insertions, 1 deletions
diff --git a/recipes/gcc/gcc-4.5.inc b/recipes/gcc/gcc-4.5.inc
index b86b9d8482..900804cfd0 100644
--- a/recipes/gcc/gcc-4.5.inc
+++ b/recipes/gcc/gcc-4.5.inc
@@ -8,7 +8,7 @@ DEPENDS = "mpfr gmp libmpc libelf"
NATIVEDEPS = "mpfr-native gmp-native libmpc-native"
-INC_PR = "r14"
+INC_PR = "r15"
SRCREV = "164562"
PV = "4.5"
@@ -117,6 +117,20 @@ SRC_URI = "svn://gcc.gnu.org/svn/gcc/branches;module=${BRANCH} \
file://linaro/gcc-4.5-linaro-r99396.patch \
file://linaro/gcc-4.5-linaro-r99397.patch \
file://linaro/gcc-4.5-linaro-r99398.patch \
+ file://linaro/gcc-4.5-linaro-r99402.patch \
+ file://linaro/gcc-4.5-linaro-r99403.patch \
+ file://linaro/gcc-4.5-linaro-r99404.patch \
+ file://linaro/gcc-4.5-linaro-r99405.patch \
+ file://linaro/gcc-4.5-linaro-r99406.patch \
+ file://linaro/gcc-4.5-linaro-r99407.patch \
+ file://linaro/gcc-4.5-linaro-r99408.patch \
+ file://linaro/gcc-4.5-linaro-r99409.patch \
+ file://linaro/gcc-4.5-linaro-r99410.patch \
+ file://linaro/gcc-4.5-linaro-r99411.patch \
+ file://linaro/gcc-4.5-linaro-r99412.patch \
+ file://linaro/gcc-4.5-linaro-r99413.patch \
+ file://linaro/gcc-4.5-linaro-r99414.patch \
+ file://linaro/gcc-4.5-linaro-r99415.patch \
file://gcc-vmovl-PR45805.patch \
file://gcc-scalar-widening-pr45847.patch \
file://gcc-linaro-fix-lp-653316.patch \
diff --git a/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99402.patch b/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99402.patch
new file mode 100644
index 0000000000..6627a11d4a
--- /dev/null
+++ b/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99402.patch
@@ -0,0 +1,1268 @@
+2010-09-17 Chung-Lin Tang <cltang@codesourcery.com>
+
+ Backport from mainline:
+
+ 2010-07-15 Bernd Schmidt <bernds@codesourcery.com>
+
+ gcc/
+ * postreload.c (last_label_ruid, first_index_reg, last_index_reg):
+ New static variables.
+ (reload_combine_recognize_pattern): New static function, broken out
+ of reload_combine.
+ (reload_combine): Use it. Only initialize first_index_reg and
+ last_index_reg once.
+
+ 2010-07-17 Bernd Schmidt <bernds@codesourcery.com>
+
+ PR target/42235
+ gcc/
+ * postreload.c (reload_cse_move2add): Return bool, true if anything.
+ changed. All callers changed.
+ (move2add_use_add2_insn): Likewise.
+ (move2add_use_add3_insn): Likewise.
+ (reload_cse_regs): If reload_cse_move2add changed anything, rerun
+ reload_combine.
+ (RELOAD_COMBINE_MAX_USES): Bump to 16.
+ (last_jump_ruid): New static variable.
+ (struct reg_use): New members CONTAINING_MEM and RUID.
+ (reg_state): New members ALL_OFFSETS_MATCH and REAL_STORE_RUID.
+ (reload_combine_split_one_ruid, reload_combine_split_ruids,
+ reload_combine_purge_insn_uses, reload_combine_closest_single_use
+ reload_combine_purge_reg_uses_after_ruid,
+ reload_combine_recognize_const_pattern): New static functions.
+ (reload_combine_recognize_pattern): Verify that ALL_OFFSETS_MATCH
+ is true for our reg and that we have available index regs.
+ (reload_combine_note_use): New args RUID and CONTAINING_MEM. All
+ callers changed. Use them to initialize fields in struct reg_use.
+ (reload_combine): Initialize last_jump_ruid. Be careful when to
+ take PREV_INSN of the scanned insn. Update REAL_STORE_RUID fields.
+ Call reload_combine_recognize_const_pattern.
+ (reload_combine_note_store): Update REAL_STORE_RUID field.
+
+ gcc/testsuite/
+ * gcc.target/arm/pr42235.c: New test.
+
+ 2010-07-19 Bernd Schmidt <bernds@codesourcery.com>
+
+ gcc/
+ * postreload.c (reload_combine_closest_single_use): Ignore the
+ number of uses for DEBUG_INSNs.
+ (fixup_debug_insns): New static function.
+ (reload_combine_recognize_const_pattern): Use it. Don't let the
+ main loop be affected by DEBUG_INSNs.
+ Really disallow moving adds past a jump insn.
+ (reload_combine_recognize_pattern): Don't update use_ruid here.
+ (reload_combine_note_use): Do it here.
+ (reload_combine): Use control_flow_insn_p rather than JUMP_P.
+
+ 2010-07-20 Bernd Schmidt <bernds@codesourcery.com>
+
+ gcc/
+ * postreload.c (fixup_debug_insns): Remove arg REGNO. New args
+ FROM and TO. All callers changed. Don't look for tracked uses,
+ just scan the RTL for DEBUG_INSNs and substitute.
+ (reload_combine_recognize_pattern): Call fixup_debug_insns.
+ (reload_combine): Ignore DEBUG_INSNs.
+
+ 2010-07-22 Bernd Schmidt <bernds@codesourcery.com>
+
+ PR bootstrap/44970
+ PR middle-end/45009
+ gcc/
+ * postreload.c: Include "target.h".
+ (reload_combine_closest_single_use): Don't take DEBUG_INSNs
+ into account.
+ (fixup_debug_insns): Don't copy the rtx.
+ (reload_combine_recognize_const_pattern): DEBUG_INSNs can't have uses.
+ Don't copy when replacing. Call fixup_debug_insns in the case where
+ we merged one add with another.
+ (reload_combine_recognize_pattern): Fail if there aren't any uses.
+ Try harder to determine whether we're picking a valid index register.
+ Don't set store_ruid for an insn we're going to scan in the
+ next iteration.
+ (reload_combine): Remove unused code.
+ (reload_combine_note_use): When updating use information for
+ an old insn, ignore a use that occurs after store_ruid.
+ * Makefile.in (postreload.o): Update dependencies.
+
+ 2010-07-27 Bernd Schmidt <bernds@codesourcery.com>
+
+ gcc/
+ * postreload.c (reload_combine_recognize_const_pattern): Move test
+ for limiting the insn movement to the right scope.
+
+ 2010-07-27 Bernd Schmidt <bernds@codesourcery.com>
+
+ gcc/
+ * postreload.c (try_replace_in_use): New static function.
+ (reload_combine_recognize_const_pattern): Use it here. Allow
+ substituting into a final add insn, and substituting into a memory
+ reference in an insn that sets the reg.
+
+=== modified file 'gcc/Makefile.in'
+Index: gcc-4.5/gcc/Makefile.in
+===================================================================
+--- gcc-4.5.orig/gcc/Makefile.in
++++ gcc-4.5/gcc/Makefile.in
+@@ -3159,7 +3159,7 @@ postreload.o : postreload.c $(CONFIG_H)
+ $(RTL_H) $(REAL_H) $(FLAGS_H) $(EXPR_H) $(OPTABS_H) reload.h $(REGS_H) \
+ hard-reg-set.h insn-config.h $(BASIC_BLOCK_H) $(RECOG_H) output.h \
+ $(FUNCTION_H) $(TOPLEV_H) cselib.h $(TM_P_H) $(EXCEPT_H) $(TREE_H) $(MACHMODE_H) \
+- $(OBSTACK_H) $(TIMEVAR_H) $(TREE_PASS_H) $(DF_H) $(DBGCNT_H)
++ $(OBSTACK_H) $(TARGET_H) $(TIMEVAR_H) $(TREE_PASS_H) $(DF_H) $(DBGCNT_H)
+ postreload-gcse.o : postreload-gcse.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \
+ $(TM_H) $(RTL_H) $(REGS_H) hard-reg-set.h $(FLAGS_H) insn-config.h \
+ $(RECOG_H) $(EXPR_H) $(BASIC_BLOCK_H) $(FUNCTION_H) output.h $(TOPLEV_H) \
+Index: gcc-4.5/gcc/postreload.c
+===================================================================
+--- gcc-4.5.orig/gcc/postreload.c
++++ gcc-4.5/gcc/postreload.c
+@@ -44,6 +44,7 @@ along with GCC; see the file COPYING3.
+ #include "toplev.h"
+ #include "except.h"
+ #include "tree.h"
++#include "target.h"
+ #include "timevar.h"
+ #include "tree-pass.h"
+ #include "df.h"
+@@ -56,10 +57,10 @@ static int reload_cse_simplify_set (rtx,
+ static int reload_cse_simplify_operands (rtx, rtx);
+
+ static void reload_combine (void);
+-static void reload_combine_note_use (rtx *, rtx);
++static void reload_combine_note_use (rtx *, rtx, int, rtx);
+ static void reload_combine_note_store (rtx, const_rtx, void *);
+
+-static void reload_cse_move2add (rtx);
++static bool reload_cse_move2add (rtx);
+ static void move2add_note_store (rtx, const_rtx, void *);
+
+ /* Call cse / combine like post-reload optimization phases.
+@@ -67,11 +68,16 @@ static void move2add_note_store (rtx, co
+ void
+ reload_cse_regs (rtx first ATTRIBUTE_UNUSED)
+ {
++ bool moves_converted;
+ reload_cse_regs_1 (first);
+ reload_combine ();
+- reload_cse_move2add (first);
++ moves_converted = reload_cse_move2add (first);
+ if (flag_expensive_optimizations)
+- reload_cse_regs_1 (first);
++ {
++ if (moves_converted)
++ reload_combine ();
++ reload_cse_regs_1 (first);
++ }
+ }
+
+ /* See whether a single set SET is a noop. */
+@@ -660,30 +666,43 @@ reload_cse_simplify_operands (rtx insn,
+
+ /* The maximum number of uses of a register we can keep track of to
+ replace them with reg+reg addressing. */
+-#define RELOAD_COMBINE_MAX_USES 6
++#define RELOAD_COMBINE_MAX_USES 16
+
+-/* INSN is the insn where a register has been used, and USEP points to the
+- location of the register within the rtl. */
+-struct reg_use { rtx insn, *usep; };
++/* Describes a recorded use of a register. */
++struct reg_use
++{
++ /* The insn where a register has been used. */
++ rtx insn;
++ /* Points to the memory reference enclosing the use, if any, NULL_RTX
++ otherwise. */
++ rtx containing_mem;
++ /* Location of the register withing INSN. */
++ rtx *usep;
++ /* The reverse uid of the insn. */
++ int ruid;
++};
+
+ /* If the register is used in some unknown fashion, USE_INDEX is negative.
+ If it is dead, USE_INDEX is RELOAD_COMBINE_MAX_USES, and STORE_RUID
+- indicates where it becomes live again.
++ indicates where it is first set or clobbered.
+ Otherwise, USE_INDEX is the index of the last encountered use of the
+- register (which is first among these we have seen since we scan backwards),
+- OFFSET contains the constant offset that is added to the register in
+- all encountered uses, and USE_RUID indicates the first encountered, i.e.
+- last, of these uses.
++ register (which is first among these we have seen since we scan backwards).
++ USE_RUID indicates the first encountered, i.e. last, of these uses.
++ If ALL_OFFSETS_MATCH is true, all encountered uses were inside a PLUS
++ with a constant offset; OFFSET contains this constant in that case.
+ STORE_RUID is always meaningful if we only want to use a value in a
+ register in a different place: it denotes the next insn in the insn
+- stream (i.e. the last encountered) that sets or clobbers the register. */
++ stream (i.e. the last encountered) that sets or clobbers the register.
++ REAL_STORE_RUID is similar, but clobbers are ignored when updating it. */
+ static struct
+ {
+ struct reg_use reg_use[RELOAD_COMBINE_MAX_USES];
+- int use_index;
+ rtx offset;
++ int use_index;
+ int store_ruid;
++ int real_store_ruid;
+ int use_ruid;
++ bool all_offsets_match;
+ } reg_state[FIRST_PSEUDO_REGISTER];
+
+ /* Reverse linear uid. This is increased in reload_combine while scanning
+@@ -691,42 +710,548 @@ static struct
+ and the store_ruid / use_ruid fields in reg_state. */
+ static int reload_combine_ruid;
+
++/* The RUID of the last label we encountered in reload_combine. */
++static int last_label_ruid;
++
++/* The RUID of the last jump we encountered in reload_combine. */
++static int last_jump_ruid;
++
++/* The register numbers of the first and last index register. A value of
++ -1 in LAST_INDEX_REG indicates that we've previously computed these
++ values and found no suitable index registers. */
++static int first_index_reg = -1;
++static int last_index_reg;
++
+ #define LABEL_LIVE(LABEL) \
+ (label_live[CODE_LABEL_NUMBER (LABEL) - min_labelno])
+
++/* Subroutine of reload_combine_split_ruids, called to fix up a single
++ ruid pointed to by *PRUID if it is higher than SPLIT_RUID. */
++
++static inline void
++reload_combine_split_one_ruid (int *pruid, int split_ruid)
++{
++ if (*pruid > split_ruid)
++ (*pruid)++;
++}
++
++/* Called when we insert a new insn in a position we've already passed in
++ the scan. Examine all our state, increasing all ruids that are higher
++ than SPLIT_RUID by one in order to make room for a new insn. */
++
++static void
++reload_combine_split_ruids (int split_ruid)
++{
++ unsigned i;
++
++ reload_combine_split_one_ruid (&reload_combine_ruid, split_ruid);
++ reload_combine_split_one_ruid (&last_label_ruid, split_ruid);
++ reload_combine_split_one_ruid (&last_jump_ruid, split_ruid);
++
++ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
++ {
++ int j, idx = reg_state[i].use_index;
++ reload_combine_split_one_ruid (&reg_state[i].use_ruid, split_ruid);
++ reload_combine_split_one_ruid (&reg_state[i].store_ruid, split_ruid);
++ reload_combine_split_one_ruid (&reg_state[i].real_store_ruid,
++ split_ruid);
++ if (idx < 0)
++ continue;
++ for (j = idx; j < RELOAD_COMBINE_MAX_USES; j++)
++ {
++ reload_combine_split_one_ruid (&reg_state[i].reg_use[j].ruid,
++ split_ruid);
++ }
++ }
++}
++
++/* Called when we are about to rescan a previously encountered insn with
++ reload_combine_note_use after modifying some part of it. This clears all
++ information about uses in that particular insn. */
++
++static void
++reload_combine_purge_insn_uses (rtx insn)
++{
++ unsigned i;
++
++ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
++ {
++ int j, k, idx = reg_state[i].use_index;
++ if (idx < 0)
++ continue;
++ j = k = RELOAD_COMBINE_MAX_USES;
++ while (j-- > idx)
++ {
++ if (reg_state[i].reg_use[j].insn != insn)
++ {
++ k--;
++ if (k != j)
++ reg_state[i].reg_use[k] = reg_state[i].reg_use[j];
++ }
++ }
++ reg_state[i].use_index = k;
++ }
++}
++
++/* Called when we need to forget about all uses of REGNO after an insn
++ which is identified by RUID. */
++
++static void
++reload_combine_purge_reg_uses_after_ruid (unsigned regno, int ruid)
++{
++ int j, k, idx = reg_state[regno].use_index;
++ if (idx < 0)
++ return;
++ j = k = RELOAD_COMBINE_MAX_USES;
++ while (j-- > idx)
++ {
++ if (reg_state[regno].reg_use[j].ruid >= ruid)
++ {
++ k--;
++ if (k != j)
++ reg_state[regno].reg_use[k] = reg_state[regno].reg_use[j];
++ }
++ }
++ reg_state[regno].use_index = k;
++}
++
++/* Find the use of REGNO with the ruid that is highest among those
++ lower than RUID_LIMIT, and return it if it is the only use of this
++ reg in the insn. Return NULL otherwise. */
++
++static struct reg_use *
++reload_combine_closest_single_use (unsigned regno, int ruid_limit)
++{
++ int i, best_ruid = 0;
++ int use_idx = reg_state[regno].use_index;
++ struct reg_use *retval;
++
++ if (use_idx < 0)
++ return NULL;
++ retval = NULL;
++ for (i = use_idx; i < RELOAD_COMBINE_MAX_USES; i++)
++ {
++ struct reg_use *use = reg_state[regno].reg_use + i;
++ int this_ruid = use->ruid;
++ if (this_ruid >= ruid_limit)
++ continue;
++ if (this_ruid > best_ruid)
++ {
++ best_ruid = this_ruid;
++ retval = use;
++ }
++ else if (this_ruid == best_ruid)
++ retval = NULL;
++ }
++ if (last_label_ruid >= best_ruid)
++ return NULL;
++ return retval;
++}
++
++/* After we've moved an add insn, fix up any debug insns that occur
++ between the old location of the add and the new location. REG is
++ the destination register of the add insn; REPLACEMENT is the
++ SET_SRC of the add. FROM and TO specify the range in which we
++ should make this change on debug insns. */
++
++static void
++fixup_debug_insns (rtx reg, rtx replacement, rtx from, rtx to)
++{
++ rtx insn;
++ for (insn = from; insn != to; insn = NEXT_INSN (insn))
++ {
++ rtx t;
++
++ if (!DEBUG_INSN_P (insn))
++ continue;
++
++ t = INSN_VAR_LOCATION_LOC (insn);
++ t = simplify_replace_rtx (t, reg, replacement);
++ validate_change (insn, &INSN_VAR_LOCATION_LOC (insn), t, 0);
++ }
++}
++
++/* Subroutine of reload_combine_recognize_const_pattern. Try to replace REG
++ with SRC in the insn described by USE, taking costs into account. Return
++ true if we made the replacement. */
++
++static bool
++try_replace_in_use (struct reg_use *use, rtx reg, rtx src)
++{
++ rtx use_insn = use->insn;
++ rtx mem = use->containing_mem;
++ bool speed = optimize_bb_for_speed_p (BLOCK_FOR_INSN (use_insn));
++
++ if (mem != NULL_RTX)
++ {
++ addr_space_t as = MEM_ADDR_SPACE (mem);
++ rtx oldaddr = XEXP (mem, 0);
++ rtx newaddr = NULL_RTX;
++ int old_cost = address_cost (oldaddr, GET_MODE (mem), as, speed);
++ int new_cost;
++
++ newaddr = simplify_replace_rtx (oldaddr, reg, src);
++ if (memory_address_addr_space_p (GET_MODE (mem), newaddr, as))
++ {
++ XEXP (mem, 0) = newaddr;
++ new_cost = address_cost (newaddr, GET_MODE (mem), as, speed);
++ XEXP (mem, 0) = oldaddr;
++ if (new_cost <= old_cost
++ && validate_change (use_insn,
++ &XEXP (mem, 0), newaddr, 0))
++ return true;
++ }
++ }
++ else
++ {
++ rtx new_set = single_set (use_insn);
++ if (new_set
++ && REG_P (SET_DEST (new_set))
++ && GET_CODE (SET_SRC (new_set)) == PLUS
++ && REG_P (XEXP (SET_SRC (new_set), 0))
++ && CONSTANT_P (XEXP (SET_SRC (new_set), 1)))
++ {
++ rtx new_src;
++ int old_cost = rtx_cost (SET_SRC (new_set), SET, speed);
++
++ gcc_assert (rtx_equal_p (XEXP (SET_SRC (new_set), 0), reg));
++ new_src = simplify_replace_rtx (SET_SRC (new_set), reg, src);
++
++ if (rtx_cost (new_src, SET, speed) <= old_cost
++ && validate_change (use_insn, &SET_SRC (new_set),
++ new_src, 0))
++ return true;
++ }
++ }
++ return false;
++}
++
++/* Called by reload_combine when scanning INSN. This function tries to detect
++ patterns where a constant is added to a register, and the result is used
++ in an address.
++ Return true if no further processing is needed on INSN; false if it wasn't
++ recognized and should be handled normally. */
++
++static bool
++reload_combine_recognize_const_pattern (rtx insn)
++{
++ int from_ruid = reload_combine_ruid;
++ rtx set, pat, reg, src, addreg;
++ unsigned int regno;
++ struct reg_use *use;
++ bool must_move_add;
++ rtx add_moved_after_insn = NULL_RTX;
++ int add_moved_after_ruid = 0;
++ int clobbered_regno = -1;
++
++ set = single_set (insn);
++ if (set == NULL_RTX)
++ return false;
++
++ reg = SET_DEST (set);
++ src = SET_SRC (set);
++ if (!REG_P (reg)
++ || hard_regno_nregs[REGNO (reg)][GET_MODE (reg)] != 1
++ || GET_MODE (reg) != Pmode
++ || reg == stack_pointer_rtx)
++ return false;
++
++ regno = REGNO (reg);
++
++ /* We look for a REG1 = REG2 + CONSTANT insn, followed by either
++ uses of REG1 inside an address, or inside another add insn. If
++ possible and profitable, merge the addition into subsequent
++ uses. */
++ if (GET_CODE (src) != PLUS
++ || !REG_P (XEXP (src, 0))
++ || !CONSTANT_P (XEXP (src, 1)))
++ return false;
++
++ addreg = XEXP (src, 0);
++ must_move_add = rtx_equal_p (reg, addreg);
++
++ pat = PATTERN (insn);
++ if (must_move_add && set != pat)
++ {
++ /* We have to be careful when moving the add; apart from the
++ single_set there may also be clobbers. Recognize one special
++ case, that of one clobber alongside the set (likely a clobber
++ of the CC register). */
++ gcc_assert (GET_CODE (PATTERN (insn)) == PARALLEL);
++ if (XVECLEN (pat, 0) != 2 || XVECEXP (pat, 0, 0) != set
++ || GET_CODE (XVECEXP (pat, 0, 1)) != CLOBBER
++ || !REG_P (XEXP (XVECEXP (pat, 0, 1), 0)))
++ return false;
++ clobbered_regno = REGNO (XEXP (XVECEXP (pat, 0, 1), 0));
++ }
++
++ do
++ {
++ use = reload_combine_closest_single_use (regno, from_ruid);
++
++ if (use)
++ /* Start the search for the next use from here. */
++ from_ruid = use->ruid;
++
++ if (use && GET_MODE (*use->usep) == Pmode)
++ {
++ bool delete_add = false;
++ rtx use_insn = use->insn;
++ int use_ruid = use->ruid;
++
++ /* Avoid moving the add insn past a jump. */
++ if (must_move_add && use_ruid <= last_jump_ruid)
++ break;
++
++ /* If the add clobbers another hard reg in parallel, don't move
++ it past a real set of this hard reg. */
++ if (must_move_add && clobbered_regno >= 0
++ && reg_state[clobbered_regno].real_store_ruid >= use_ruid)
++ break;
++
++ gcc_assert (reg_state[regno].store_ruid <= use_ruid);
++ /* Avoid moving a use of ADDREG past a point where it is stored. */
++ if (reg_state[REGNO (addreg)].store_ruid > use_ruid)
++ break;
++
++ /* We also must not move the addition past an insn that sets
++ the same register, unless we can combine two add insns. */
++ if (must_move_add && reg_state[regno].store_ruid == use_ruid)
++ {
++ if (use->containing_mem == NULL_RTX)
++ delete_add = true;
++ else
++ break;
++ }
++
++ if (try_replace_in_use (use, reg, src))
++ {
++ reload_combine_purge_insn_uses (use_insn);
++ reload_combine_note_use (&PATTERN (use_insn), use_insn,
++ use_ruid, NULL_RTX);
++
++ if (delete_add)
++ {
++ fixup_debug_insns (reg, src, insn, use_insn);
++ delete_insn (insn);
++ return true;
++ }
++ if (must_move_add)
++ {
++ add_moved_after_insn = use_insn;
++ add_moved_after_ruid = use_ruid;
++ }
++ continue;
++ }
++ }
++ /* If we get here, we couldn't handle this use. */
++ if (must_move_add)
++ break;
++ }
++ while (use);
++
++ if (!must_move_add || add_moved_after_insn == NULL_RTX)
++ /* Process the add normally. */
++ return false;
++
++ fixup_debug_insns (reg, src, insn, add_moved_after_insn);
++
++ reorder_insns (insn, insn, add_moved_after_insn);
++ reload_combine_purge_reg_uses_after_ruid (regno, add_moved_after_ruid);
++ reload_combine_split_ruids (add_moved_after_ruid - 1);
++ reload_combine_note_use (&PATTERN (insn), insn,
++ add_moved_after_ruid, NULL_RTX);
++ reg_state[regno].store_ruid = add_moved_after_ruid;
++
++ return true;
++}
++
++/* Called by reload_combine when scanning INSN. Try to detect a pattern we
++ can handle and improve. Return true if no further processing is needed on
++ INSN; false if it wasn't recognized and should be handled normally. */
++
++static bool
++reload_combine_recognize_pattern (rtx insn)
++{
++ rtx set, reg, src;
++ unsigned int regno;
++
++ set = single_set (insn);
++ if (set == NULL_RTX)
++ return false;
++
++ reg = SET_DEST (set);
++ src = SET_SRC (set);
++ if (!REG_P (reg)
++ || hard_regno_nregs[REGNO (reg)][GET_MODE (reg)] != 1)
++ return false;
++
++ regno = REGNO (reg);
++
++ /* Look for (set (REGX) (CONST_INT))
++ (set (REGX) (PLUS (REGX) (REGY)))
++ ...
++ ... (MEM (REGX)) ...
++ and convert it to
++ (set (REGZ) (CONST_INT))
++ ...
++ ... (MEM (PLUS (REGZ) (REGY)))... .
++
++ First, check that we have (set (REGX) (PLUS (REGX) (REGY)))
++ and that we know all uses of REGX before it dies.
++ Also, explicitly check that REGX != REGY; our life information
++ does not yet show whether REGY changes in this insn. */
++
++ if (GET_CODE (src) == PLUS
++ && reg_state[regno].all_offsets_match
++ && last_index_reg != -1
++ && REG_P (XEXP (src, 1))
++ && rtx_equal_p (XEXP (src, 0), reg)
++ && !rtx_equal_p (XEXP (src, 1), reg)
++ && reg_state[regno].use_index >= 0
++ && reg_state[regno].use_index < RELOAD_COMBINE_MAX_USES
++ && last_label_ruid < reg_state[regno].use_ruid)
++ {
++ rtx base = XEXP (src, 1);
++ rtx prev = prev_nonnote_insn (insn);
++ rtx prev_set = prev ? single_set (prev) : NULL_RTX;
++ rtx index_reg = NULL_RTX;
++ rtx reg_sum = NULL_RTX;
++ int i;
++
++ /* Now we need to set INDEX_REG to an index register (denoted as
++ REGZ in the illustration above) and REG_SUM to the expression
++ register+register that we want to use to substitute uses of REG
++ (typically in MEMs) with. First check REG and BASE for being
++ index registers; we can use them even if they are not dead. */
++ if (TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS], regno)
++ || TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS],
++ REGNO (base)))
++ {
++ index_reg = reg;
++ reg_sum = src;
++ }
++ else
++ {
++ /* Otherwise, look for a free index register. Since we have
++ checked above that neither REG nor BASE are index registers,
++ if we find anything at all, it will be different from these
++ two registers. */
++ for (i = first_index_reg; i <= last_index_reg; i++)
++ {
++ if (TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS], i)
++ && reg_state[i].use_index == RELOAD_COMBINE_MAX_USES
++ && reg_state[i].store_ruid <= reg_state[regno].use_ruid
++ && (call_used_regs[i] || df_regs_ever_live_p (i))
++ && (!frame_pointer_needed || i != HARD_FRAME_POINTER_REGNUM)
++ && !fixed_regs[i] && !global_regs[i]
++ && hard_regno_nregs[i][GET_MODE (reg)] == 1
++ && targetm.hard_regno_scratch_ok (i))
++ {
++ index_reg = gen_rtx_REG (GET_MODE (reg), i);
++ reg_sum = gen_rtx_PLUS (GET_MODE (reg), index_reg, base);
++ break;
++ }
++ }
++ }
++
++ /* Check that PREV_SET is indeed (set (REGX) (CONST_INT)) and that
++ (REGY), i.e. BASE, is not clobbered before the last use we'll
++ create. */
++ if (reg_sum
++ && prev_set
++ && CONST_INT_P (SET_SRC (prev_set))
++ && rtx_equal_p (SET_DEST (prev_set), reg)
++ && (reg_state[REGNO (base)].store_ruid
++ <= reg_state[regno].use_ruid))
++ {
++ /* Change destination register and, if necessary, the constant
++ value in PREV, the constant loading instruction. */
++ validate_change (prev, &SET_DEST (prev_set), index_reg, 1);
++ if (reg_state[regno].offset != const0_rtx)
++ validate_change (prev,
++ &SET_SRC (prev_set),
++ GEN_INT (INTVAL (SET_SRC (prev_set))
++ + INTVAL (reg_state[regno].offset)),
++ 1);
++
++ /* Now for every use of REG that we have recorded, replace REG
++ with REG_SUM. */
++ for (i = reg_state[regno].use_index;
++ i < RELOAD_COMBINE_MAX_USES; i++)
++ validate_unshare_change (reg_state[regno].reg_use[i].insn,
++ reg_state[regno].reg_use[i].usep,
++ /* Each change must have its own
++ replacement. */
++ reg_sum, 1);
++
++ if (apply_change_group ())
++ {
++ struct reg_use *lowest_ruid = NULL;
++
++ /* For every new use of REG_SUM, we have to record the use
++ of BASE therein, i.e. operand 1. */
++ for (i = reg_state[regno].use_index;
++ i < RELOAD_COMBINE_MAX_USES; i++)
++ {
++ struct reg_use *use = reg_state[regno].reg_use + i;
++ reload_combine_note_use (&XEXP (*use->usep, 1), use->insn,
++ use->ruid, use->containing_mem);
++ if (lowest_ruid == NULL || use->ruid < lowest_ruid->ruid)
++ lowest_ruid = use;
++ }
++
++ fixup_debug_insns (reg, reg_sum, insn, lowest_ruid->insn);
++
++ /* Delete the reg-reg addition. */
++ delete_insn (insn);
++
++ if (reg_state[regno].offset != const0_rtx)
++ /* Previous REG_EQUIV / REG_EQUAL notes for PREV
++ are now invalid. */
++ remove_reg_equal_equiv_notes (prev);
++
++ reg_state[regno].use_index = RELOAD_COMBINE_MAX_USES;
++ return true;
++ }
++ }
++ }
++ return false;
++}
++
+ static void
+ reload_combine (void)
+ {
+- rtx insn, set;
+- int first_index_reg = -1;
+- int last_index_reg = 0;
++ rtx insn, prev;
+ int i;
+ basic_block bb;
+ unsigned int r;
+- int last_label_ruid;
+ int min_labelno, n_labels;
+ HARD_REG_SET ever_live_at_start, *label_live;
+
+- /* If reg+reg can be used in offsetable memory addresses, the main chunk of
+- reload has already used it where appropriate, so there is no use in
+- trying to generate it now. */
+- if (double_reg_address_ok && INDEX_REG_CLASS != NO_REGS)
+- return;
+-
+ /* To avoid wasting too much time later searching for an index register,
+ determine the minimum and maximum index register numbers. */
+- for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
+- if (TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS], r))
+- {
+- if (first_index_reg == -1)
+- first_index_reg = r;
++ if (INDEX_REG_CLASS == NO_REGS)
++ last_index_reg = -1;
++ else if (first_index_reg == -1 && last_index_reg == 0)
++ {
++ for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
++ if (TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS], r))
++ {
++ if (first_index_reg == -1)
++ first_index_reg = r;
+
+- last_index_reg = r;
+- }
++ last_index_reg = r;
++ }
+
+- /* If no index register is available, we can quit now. */
+- if (first_index_reg == -1)
+- return;
++ /* If no index register is available, we can quit now. Set LAST_INDEX_REG
++ to -1 so we'll know to quit early the next time we get here. */
++ if (first_index_reg == -1)
++ {
++ last_index_reg = -1;
++ return;
++ }
++ }
+
+ /* Set up LABEL_LIVE and EVER_LIVE_AT_START. The register lifetime
+ information is a bit fuzzy immediately after reload, but it's
+@@ -753,20 +1278,23 @@ reload_combine (void)
+ }
+
+ /* Initialize last_label_ruid, reload_combine_ruid and reg_state. */
+- last_label_ruid = reload_combine_ruid = 0;
++ last_label_ruid = last_jump_ruid = reload_combine_ruid = 0;
+ for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
+ {
+- reg_state[r].store_ruid = reload_combine_ruid;
++ reg_state[r].store_ruid = 0;
++ reg_state[r].real_store_ruid = 0;
+ if (fixed_regs[r])
+ reg_state[r].use_index = -1;
+ else
+ reg_state[r].use_index = RELOAD_COMBINE_MAX_USES;
+ }
+
+- for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
++ for (insn = get_last_insn (); insn; insn = prev)
+ {
+ rtx note;
+
++ prev = PREV_INSN (insn);
++
+ /* We cannot do our optimization across labels. Invalidating all the use
+ information we have would be costly, so we just note where the label
+ is and then later disable any optimization that would cross it. */
+@@ -777,141 +1305,17 @@ reload_combine (void)
+ if (! fixed_regs[r])
+ reg_state[r].use_index = RELOAD_COMBINE_MAX_USES;
+
+- if (! INSN_P (insn))
++ if (! NONDEBUG_INSN_P (insn))
+ continue;
+
+ reload_combine_ruid++;
+
+- /* Look for (set (REGX) (CONST_INT))
+- (set (REGX) (PLUS (REGX) (REGY)))
+- ...
+- ... (MEM (REGX)) ...
+- and convert it to
+- (set (REGZ) (CONST_INT))
+- ...
+- ... (MEM (PLUS (REGZ) (REGY)))... .
+-
+- First, check that we have (set (REGX) (PLUS (REGX) (REGY)))
+- and that we know all uses of REGX before it dies.
+- Also, explicitly check that REGX != REGY; our life information
+- does not yet show whether REGY changes in this insn. */
+- set = single_set (insn);
+- if (set != NULL_RTX
+- && REG_P (SET_DEST (set))
+- && (hard_regno_nregs[REGNO (SET_DEST (set))]
+- [GET_MODE (SET_DEST (set))]
+- == 1)
+- && GET_CODE (SET_SRC (set)) == PLUS
+- && REG_P (XEXP (SET_SRC (set), 1))
+- && rtx_equal_p (XEXP (SET_SRC (set), 0), SET_DEST (set))
+- && !rtx_equal_p (XEXP (SET_SRC (set), 1), SET_DEST (set))
+- && last_label_ruid < reg_state[REGNO (SET_DEST (set))].use_ruid)
+- {
+- rtx reg = SET_DEST (set);
+- rtx plus = SET_SRC (set);
+- rtx base = XEXP (plus, 1);
+- rtx prev = prev_nonnote_nondebug_insn (insn);
+- rtx prev_set = prev ? single_set (prev) : NULL_RTX;
+- unsigned int regno = REGNO (reg);
+- rtx index_reg = NULL_RTX;
+- rtx reg_sum = NULL_RTX;
+-
+- /* Now we need to set INDEX_REG to an index register (denoted as
+- REGZ in the illustration above) and REG_SUM to the expression
+- register+register that we want to use to substitute uses of REG
+- (typically in MEMs) with. First check REG and BASE for being
+- index registers; we can use them even if they are not dead. */
+- if (TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS], regno)
+- || TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS],
+- REGNO (base)))
+- {
+- index_reg = reg;
+- reg_sum = plus;
+- }
+- else
+- {
+- /* Otherwise, look for a free index register. Since we have
+- checked above that neither REG nor BASE are index registers,
+- if we find anything at all, it will be different from these
+- two registers. */
+- for (i = first_index_reg; i <= last_index_reg; i++)
+- {
+- if (TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS],
+- i)
+- && reg_state[i].use_index == RELOAD_COMBINE_MAX_USES
+- && reg_state[i].store_ruid <= reg_state[regno].use_ruid
+- && hard_regno_nregs[i][GET_MODE (reg)] == 1)
+- {
+- index_reg = gen_rtx_REG (GET_MODE (reg), i);
+- reg_sum = gen_rtx_PLUS (GET_MODE (reg), index_reg, base);
+- break;
+- }
+- }
+- }
+-
+- /* Check that PREV_SET is indeed (set (REGX) (CONST_INT)) and that
+- (REGY), i.e. BASE, is not clobbered before the last use we'll
+- create. */
+- if (reg_sum
+- && prev_set
+- && CONST_INT_P (SET_SRC (prev_set))
+- && rtx_equal_p (SET_DEST (prev_set), reg)
+- && reg_state[regno].use_index >= 0
+- && (reg_state[REGNO (base)].store_ruid
+- <= reg_state[regno].use_ruid))
+- {
+- int i;
+-
+- /* Change destination register and, if necessary, the constant
+- value in PREV, the constant loading instruction. */
+- validate_change (prev, &SET_DEST (prev_set), index_reg, 1);
+- if (reg_state[regno].offset != const0_rtx)
+- validate_change (prev,
+- &SET_SRC (prev_set),
+- GEN_INT (INTVAL (SET_SRC (prev_set))
+- + INTVAL (reg_state[regno].offset)),
+- 1);
++ if (control_flow_insn_p (insn))
++ last_jump_ruid = reload_combine_ruid;
+
+- /* Now for every use of REG that we have recorded, replace REG
+- with REG_SUM. */
+- for (i = reg_state[regno].use_index;
+- i < RELOAD_COMBINE_MAX_USES; i++)
+- validate_unshare_change (reg_state[regno].reg_use[i].insn,
+- reg_state[regno].reg_use[i].usep,
+- /* Each change must have its own
+- replacement. */
+- reg_sum, 1);
+-
+- if (apply_change_group ())
+- {
+- /* For every new use of REG_SUM, we have to record the use
+- of BASE therein, i.e. operand 1. */
+- for (i = reg_state[regno].use_index;
+- i < RELOAD_COMBINE_MAX_USES; i++)
+- reload_combine_note_use
+- (&XEXP (*reg_state[regno].reg_use[i].usep, 1),
+- reg_state[regno].reg_use[i].insn);
+-
+- if (reg_state[REGNO (base)].use_ruid
+- > reg_state[regno].use_ruid)
+- reg_state[REGNO (base)].use_ruid
+- = reg_state[regno].use_ruid;
+-
+- /* Delete the reg-reg addition. */
+- delete_insn (insn);
+-
+- if (reg_state[regno].offset != const0_rtx)
+- /* Previous REG_EQUIV / REG_EQUAL notes for PREV
+- are now invalid. */
+- remove_reg_equal_equiv_notes (prev);
+-
+- reg_state[regno].use_index = RELOAD_COMBINE_MAX_USES;
+- reg_state[REGNO (index_reg)].store_ruid
+- = reload_combine_ruid;
+- continue;
+- }
+- }
+- }
++ if (reload_combine_recognize_const_pattern (insn)
++ || reload_combine_recognize_pattern (insn))
++ continue;
+
+ note_stores (PATTERN (insn), reload_combine_note_store, NULL);
+
+@@ -967,7 +1371,8 @@ reload_combine (void)
+ reg_state[i].use_index = -1;
+ }
+
+- reload_combine_note_use (&PATTERN (insn), insn);
++ reload_combine_note_use (&PATTERN (insn), insn,
++ reload_combine_ruid, NULL_RTX);
+ for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
+ {
+ if (REG_NOTE_KIND (note) == REG_INC
+@@ -976,6 +1381,7 @@ reload_combine (void)
+ int regno = REGNO (XEXP (note, 0));
+
+ reg_state[regno].store_ruid = reload_combine_ruid;
++ reg_state[regno].real_store_ruid = reload_combine_ruid;
+ reg_state[regno].use_index = -1;
+ }
+ }
+@@ -985,8 +1391,8 @@ reload_combine (void)
+ }
+
+ /* Check if DST is a register or a subreg of a register; if it is,
+- update reg_state[regno].store_ruid and reg_state[regno].use_index
+- accordingly. Called via note_stores from reload_combine. */
++ update store_ruid, real_store_ruid and use_index in the reg_state
++ structure accordingly. Called via note_stores from reload_combine. */
+
+ static void
+ reload_combine_note_store (rtx dst, const_rtx set, void *data ATTRIBUTE_UNUSED)
+@@ -1010,14 +1416,14 @@ reload_combine_note_store (rtx dst, cons
+ /* note_stores might have stripped a STRICT_LOW_PART, so we have to be
+ careful with registers / register parts that are not full words.
+ Similarly for ZERO_EXTRACT. */
+- if (GET_CODE (set) != SET
+- || GET_CODE (SET_DEST (set)) == ZERO_EXTRACT
++ if (GET_CODE (SET_DEST (set)) == ZERO_EXTRACT
+ || GET_CODE (SET_DEST (set)) == STRICT_LOW_PART)
+ {
+ for (i = hard_regno_nregs[regno][mode] - 1 + regno; i >= regno; i--)
+ {
+ reg_state[i].use_index = -1;
+ reg_state[i].store_ruid = reload_combine_ruid;
++ reg_state[i].real_store_ruid = reload_combine_ruid;
+ }
+ }
+ else
+@@ -1025,6 +1431,8 @@ reload_combine_note_store (rtx dst, cons
+ for (i = hard_regno_nregs[regno][mode] - 1 + regno; i >= regno; i--)
+ {
+ reg_state[i].store_ruid = reload_combine_ruid;
++ if (GET_CODE (set) == SET)
++ reg_state[i].real_store_ruid = reload_combine_ruid;
+ reg_state[i].use_index = RELOAD_COMBINE_MAX_USES;
+ }
+ }
+@@ -1035,7 +1443,7 @@ reload_combine_note_store (rtx dst, cons
+ *XP is the pattern of INSN, or a part of it.
+ Called from reload_combine, and recursively by itself. */
+ static void
+-reload_combine_note_use (rtx *xp, rtx insn)
++reload_combine_note_use (rtx *xp, rtx insn, int ruid, rtx containing_mem)
+ {
+ rtx x = *xp;
+ enum rtx_code code = x->code;
+@@ -1048,7 +1456,7 @@ reload_combine_note_use (rtx *xp, rtx in
+ case SET:
+ if (REG_P (SET_DEST (x)))
+ {
+- reload_combine_note_use (&SET_SRC (x), insn);
++ reload_combine_note_use (&SET_SRC (x), insn, ruid, NULL_RTX);
+ return;
+ }
+ break;
+@@ -1104,6 +1512,11 @@ reload_combine_note_use (rtx *xp, rtx in
+ return;
+ }
+
++ /* We may be called to update uses in previously seen insns.
++ Don't add uses beyond the last store we saw. */
++ if (ruid < reg_state[regno].store_ruid)
++ return;
++
+ /* If this register is already used in some unknown fashion, we
+ can't do anything.
+ If we decrement the index from zero to -1, we can't store more
+@@ -1112,29 +1525,34 @@ reload_combine_note_use (rtx *xp, rtx in
+ if (use_index < 0)
+ return;
+
+- if (use_index != RELOAD_COMBINE_MAX_USES - 1)
+- {
+- /* We have found another use for a register that is already
+- used later. Check if the offsets match; if not, mark the
+- register as used in an unknown fashion. */
+- if (! rtx_equal_p (offset, reg_state[regno].offset))
+- {
+- reg_state[regno].use_index = -1;
+- return;
+- }
+- }
+- else
++ if (use_index == RELOAD_COMBINE_MAX_USES - 1)
+ {
+ /* This is the first use of this register we have seen since we
+ marked it as dead. */
+ reg_state[regno].offset = offset;
+- reg_state[regno].use_ruid = reload_combine_ruid;
++ reg_state[regno].all_offsets_match = true;
++ reg_state[regno].use_ruid = ruid;
++ }
++ else
++ {
++ if (reg_state[regno].use_ruid > ruid)
++ reg_state[regno].use_ruid = ruid;
++
++ if (! rtx_equal_p (offset, reg_state[regno].offset))
++ reg_state[regno].all_offsets_match = false;
+ }
++
+ reg_state[regno].reg_use[use_index].insn = insn;
++ reg_state[regno].reg_use[use_index].ruid = ruid;
++ reg_state[regno].reg_use[use_index].containing_mem = containing_mem;
+ reg_state[regno].reg_use[use_index].usep = xp;
+ return;
+ }
+
++ case MEM:
++ containing_mem = x;
++ break;
++
+ default:
+ break;
+ }
+@@ -1144,11 +1562,12 @@ reload_combine_note_use (rtx *xp, rtx in
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+- reload_combine_note_use (&XEXP (x, i), insn);
++ reload_combine_note_use (&XEXP (x, i), insn, ruid, containing_mem);
+ else if (fmt[i] == 'E')
+ {
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+- reload_combine_note_use (&XVECEXP (x, i, j), insn);
++ reload_combine_note_use (&XVECEXP (x, i, j), insn, ruid,
++ containing_mem);
+ }
+ }
+ }
+@@ -1196,9 +1615,10 @@ static int move2add_last_label_luid;
+ while REG is known to already have value (SYM + offset).
+ This function tries to change INSN into an add instruction
+ (set (REG) (plus (REG) (OFF - offset))) using the known value.
+- It also updates the information about REG's known value. */
++ It also updates the information about REG's known value.
++ Return true if we made a change. */
+
+-static void
++static bool
+ move2add_use_add2_insn (rtx reg, rtx sym, rtx off, rtx insn)
+ {
+ rtx pat = PATTERN (insn);
+@@ -1207,6 +1627,7 @@ move2add_use_add2_insn (rtx reg, rtx sym
+ rtx new_src = gen_int_mode (INTVAL (off) - reg_offset[regno],
+ GET_MODE (reg));
+ bool speed = optimize_bb_for_speed_p (BLOCK_FOR_INSN (insn));
++ bool changed = false;
+
+ /* (set (reg) (plus (reg) (const_int 0))) is not canonical;
+ use (set (reg) (reg)) instead.
+@@ -1221,13 +1642,13 @@ move2add_use_add2_insn (rtx reg, rtx sym
+ (reg)), would be discarded. Maybe we should
+ try a truncMN pattern? */
+ if (INTVAL (off) == reg_offset [regno])
+- validate_change (insn, &SET_SRC (pat), reg, 0);
++ changed = validate_change (insn, &SET_SRC (pat), reg, 0);
+ }
+ else if (rtx_cost (new_src, PLUS, speed) < rtx_cost (src, SET, speed)
+ && have_add2_insn (reg, new_src))
+ {
+ rtx tem = gen_rtx_PLUS (GET_MODE (reg), reg, new_src);
+- validate_change (insn, &SET_SRC (pat), tem, 0);
++ changed = validate_change (insn, &SET_SRC (pat), tem, 0);
+ }
+ else if (sym == NULL_RTX && GET_MODE (reg) != BImode)
+ {
+@@ -1252,8 +1673,9 @@ move2add_use_add2_insn (rtx reg, rtx sym
+ gen_rtx_STRICT_LOW_PART (VOIDmode,
+ narrow_reg),
+ narrow_src);
+- if (validate_change (insn, &PATTERN (insn),
+- new_set, 0))
++ changed = validate_change (insn, &PATTERN (insn),
++ new_set, 0);
++ if (changed)
+ break;
+ }
+ }
+@@ -1263,6 +1685,7 @@ move2add_use_add2_insn (rtx reg, rtx sym
+ reg_mode[regno] = GET_MODE (reg);
+ reg_symbol_ref[regno] = sym;
+ reg_offset[regno] = INTVAL (off);
++ return changed;
+ }
+
+
+@@ -1272,9 +1695,10 @@ move2add_use_add2_insn (rtx reg, rtx sym
+ value (SYM + offset) and change INSN into an add instruction
+ (set (REG) (plus (the found register) (OFF - offset))) if such
+ a register is found. It also updates the information about
+- REG's known value. */
++ REG's known value.
++ Return true iff we made a change. */
+
+-static void
++static bool
+ move2add_use_add3_insn (rtx reg, rtx sym, rtx off, rtx insn)
+ {
+ rtx pat = PATTERN (insn);
+@@ -1284,6 +1708,7 @@ move2add_use_add3_insn (rtx reg, rtx sym
+ int min_regno;
+ bool speed = optimize_bb_for_speed_p (BLOCK_FOR_INSN (insn));
+ int i;
++ bool changed = false;
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (reg_set_luid[i] > move2add_last_label_luid
+@@ -1328,20 +1753,25 @@ move2add_use_add3_insn (rtx reg, rtx sym
+ GET_MODE (reg));
+ tem = gen_rtx_PLUS (GET_MODE (reg), tem, new_src);
+ }
+- validate_change (insn, &SET_SRC (pat), tem, 0);
++ if (validate_change (insn, &SET_SRC (pat), tem, 0))
++ changed = true;
+ }
+ reg_set_luid[regno] = move2add_luid;
+ reg_base_reg[regno] = -1;
+ reg_mode[regno] = GET_MODE (reg);
+ reg_symbol_ref[regno] = sym;
+ reg_offset[regno] = INTVAL (off);
++ return changed;
+ }
+
+-static void
++/* Convert move insns with constant inputs to additions if they are cheaper.
++ Return true if any changes were made. */
++static bool
+ reload_cse_move2add (rtx first)
+ {
+ int i;
+ rtx insn;
++ bool changed = false;
+
+ for (i = FIRST_PSEUDO_REGISTER - 1; i >= 0; i--)
+ {
+@@ -1402,7 +1832,7 @@ reload_cse_move2add (rtx first)
+ && reg_base_reg[regno] < 0
+ && reg_symbol_ref[regno] == NULL_RTX)
+ {
+- move2add_use_add2_insn (reg, NULL_RTX, src, insn);
++ changed |= move2add_use_add2_insn (reg, NULL_RTX, src, insn);
+ continue;
+ }
+
+@@ -1463,6 +1893,7 @@ reload_cse_move2add (rtx first)
+ }
+ if (success)
+ delete_insn (insn);
++ changed |= success;
+ insn = next;
+ reg_mode[regno] = GET_MODE (reg);
+ reg_offset[regno] =
+@@ -1508,12 +1939,12 @@ reload_cse_move2add (rtx first)
+ && reg_base_reg[regno] < 0
+ && reg_symbol_ref[regno] != NULL_RTX
+ && rtx_equal_p (sym, reg_symbol_ref[regno]))
+- move2add_use_add2_insn (reg, sym, off, insn);
++ changed |= move2add_use_add2_insn (reg, sym, off, insn);
+
+ /* Otherwise, we have to find a register whose value is sum
+ of sym and some constant value. */
+ else
+- move2add_use_add3_insn (reg, sym, off, insn);
++ changed |= move2add_use_add3_insn (reg, sym, off, insn);
+
+ continue;
+ }
+@@ -1568,6 +1999,7 @@ reload_cse_move2add (rtx first)
+ }
+ }
+ }
++ return changed;
+ }
+
+ /* SET is a SET or CLOBBER that sets DST. DATA is the insn which
+Index: gcc-4.5/testsuite/gcc.target/arm/pr42235.c
+===================================================================
+--- /dev/null
++++ gcc-4.5/testsuite/gcc.target/arm/pr42235.c
+@@ -0,0 +1,11 @@
++/* { dg-options "-mthumb -O2 -march=armv5te" } */
++/* { dg-require-effective-target arm_thumb1_ok } */
++/* { dg-final { scan-assembler-not "add\[\\t \]*r.,\[\\t \]*r.,\[\\t \]*\#1" } } */
++/* { dg-final { scan-assembler-not "add\[\\t \]*r.,\[\\t \]*\#1" } } */
++
++#include <string.h>
++
++int foo (char *x)
++{
++ memset (x, 0, 6);
++}
diff --git a/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99403.patch b/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99403.patch
new file mode 100644
index 0000000000..093dd1c570
--- /dev/null
+++ b/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99403.patch
@@ -0,0 +1,176 @@
+2010-09-20 Jie Zhang <jie@codesourcery.com>
+
+ Issue #5256
+
+ libstdc++-v3/
+
+ Backport from mainline:
+
+ 2010-05-21 Joseph Myers <joseph@codesourcery.com>
+ * acinclude.m4 (GLIBCXX_ENABLE_CLOCALE): Use GNU locale model for
+ glibc 2.3 and later, but not uClibc, without an execution test.
+ * configure: Regenerate.
+ * doc/xml/manual/configure.xml, doc/xml/manual/prerequisites.xml,
+ doc/xml/faq.xml: Update.
+
+=== modified file 'libstdc++-v3/acinclude.m4'
+Index: gcc-4.5/libstdc++-v3/acinclude.m4
+===================================================================
+--- gcc-4.5.orig/libstdc++-v3/acinclude.m4
++++ gcc-4.5/libstdc++-v3/acinclude.m4
+@@ -1740,41 +1740,11 @@ AC_DEFUN([GLIBCXX_ENABLE_CLOCALE], [
+ if test $enable_clocale_flag = gnu; then
+ AC_EGREP_CPP([_GLIBCXX_ok], [
+ #include <features.h>
+- #if __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 2)
++ #if (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 3)) && !defined(__UCLIBC__)
+ _GLIBCXX_ok
+ #endif
+ ], enable_clocale_flag=gnu, enable_clocale_flag=generic)
+
+- if test $enable_clocale = auto; then
+- # Test for bugs early in glibc-2.2.x series
+- AC_TRY_RUN([
+- #define _GNU_SOURCE 1
+- #include <locale.h>
+- #include <string.h>
+- #if __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ > 2)
+- extern __typeof(newlocale) __newlocale;
+- extern __typeof(duplocale) __duplocale;
+- extern __typeof(strcoll_l) __strcoll_l;
+- #endif
+- int main()
+- {
+- const char __one[] = "Äuglein Augmen";
+- const char __two[] = "Äuglein";
+- int i;
+- int j;
+- __locale_t loc;
+- __locale_t loc_dup;
+- loc = __newlocale(1 << LC_ALL, "de_DE", 0);
+- loc_dup = __duplocale(loc);
+- i = __strcoll_l(__one, __two, loc);
+- j = __strcoll_l(__one, __two, loc_dup);
+- return 0;
+- }
+- ],
+- [enable_clocale_flag=gnu],[enable_clocale_flag=generic],
+- [enable_clocale_flag=generic])
+- fi
+-
+ # Set it to scream when it hurts.
+ ac_save_CFLAGS="$CFLAGS"
+ CFLAGS="-Wimplicit-function-declaration -Werror"
+Index: gcc-4.5/libstdc++-v3/configure
+===================================================================
+--- gcc-4.5.orig/libstdc++-v3/configure
++++ gcc-4.5/libstdc++-v3/configure
+@@ -15627,7 +15627,7 @@ fi
+ /* end confdefs.h. */
+
+ #include <features.h>
+- #if __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 2)
++ #if (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 3)) && !defined(__UCLIBC__)
+ _GLIBCXX_ok
+ #endif
+
+@@ -15641,49 +15641,6 @@ fi
+ rm -f conftest*
+
+
+- if test $enable_clocale = auto; then
+- # Test for bugs early in glibc-2.2.x series
+- if test "$cross_compiling" = yes; then :
+- enable_clocale_flag=generic
+-else
+- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+-/* end confdefs.h. */
+-
+- #define _GNU_SOURCE 1
+- #include <locale.h>
+- #include <string.h>
+- #if __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ > 2)
+- extern __typeof(newlocale) __newlocale;
+- extern __typeof(duplocale) __duplocale;
+- extern __typeof(strcoll_l) __strcoll_l;
+- #endif
+- int main()
+- {
+- const char __one[] = "Äuglein Augmen";
+- const char __two[] = "Äuglein";
+- int i;
+- int j;
+- __locale_t loc;
+- __locale_t loc_dup;
+- loc = __newlocale(1 << LC_ALL, "de_DE", 0);
+- loc_dup = __duplocale(loc);
+- i = __strcoll_l(__one, __two, loc);
+- j = __strcoll_l(__one, __two, loc_dup);
+- return 0;
+- }
+-
+-_ACEOF
+-if ac_fn_c_try_run "$LINENO"; then :
+- enable_clocale_flag=gnu
+-else
+- enable_clocale_flag=generic
+-fi
+-rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
+- conftest.$ac_objext conftest.beam conftest.$ac_ext
+-fi
+-
+- fi
+-
+ # Set it to scream when it hurts.
+ ac_save_CFLAGS="$CFLAGS"
+ CFLAGS="-Wimplicit-function-declaration -Werror"
+Index: gcc-4.5/libstdc++-v3/doc/xml/faq.xml
+===================================================================
+--- gcc-4.5.orig/libstdc++-v3/doc/xml/faq.xml
++++ gcc-4.5/libstdc++-v3/doc/xml/faq.xml
+@@ -636,6 +636,8 @@
+ C library (glibc) version 2.2.5. That version of glibc is over a
+ year old and contains necessary bugfixes. Many GNU/Linux distros make
+ glibc version 2.3.x available now.
++ libstdc++ 4.6.0 and later require glibc 2.3 or later for this
++ localization and formatting code.
+ </para>
+ <para>The guideline is simple: the more recent the C++ library, the
+ more recent the C library. (This is also documented in the main
+Index: gcc-4.5/libstdc++-v3/doc/xml/manual/configure.xml
+===================================================================
+--- gcc-4.5.orig/libstdc++-v3/doc/xml/manual/configure.xml
++++ gcc-4.5/libstdc++-v3/doc/xml/manual/configure.xml
+@@ -113,8 +113,7 @@
+ <para>If not explicitly specified, the configure proccess tries
+ to guess the most suitable package from the choices above. The
+ default is 'generic'. On glibc-based systems of sufficient
+- vintage (2.2.5 and newer) and capability (with installed DE and
+- FR locale data), 'gnu' is automatically selected. This option
++ vintage (2.3 and newer), 'gnu' is automatically selected. This option
+ can change the library ABI.
+ </para>
+ </listitem></varlistentry>
+Index: gcc-4.5/libstdc++-v3/doc/xml/manual/prerequisites.xml
+===================================================================
+--- gcc-4.5.orig/libstdc++-v3/doc/xml/manual/prerequisites.xml
++++ gcc-4.5/libstdc++-v3/doc/xml/manual/prerequisites.xml
+@@ -52,16 +52,8 @@
+ <para>
+ If gcc 3.1.0 or later on is being used on linux, an attempt
+ will be made to use "C" library functionality necessary for
+- C++ named locale support. For gcc 3.2.1 and later, this
+- means that glibc 2.2.5 or later is required and the "C"
+- library de_DE locale information must be installed.
+- </para>
+-
+- <para>
+- Note however that the sanity checks involving the de_DE
+- locale are skipped when an explicit --enable-clocale=gnu
+- configure option is used: only the basic checks are carried
+- out, defending against misconfigurations.
++ C++ named locale support. For gcc 4.6.0 and later, this
++ means that glibc 2.3 or later is required.
+ </para>
+
+ <para>
diff --git a/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99404.patch b/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99404.patch
new file mode 100644
index 0000000000..2753300925
--- /dev/null
+++ b/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99404.patch
@@ -0,0 +1,386 @@
+2010-09-20 Jie Zhang <jie@codesourcery.com>
+
+ Issue #9019
+
+ Backport from mainline:
+
+ gcc/
+ 2010-09-20 Jie Zhang <jie@codesourcery.com>
+ * config/arm/arm.c (arm_address_offset_is_imm): New.
+ (arm_early_store_addr_dep): New.
+ (arm_early_load_addr_dep): New.
+ * config/arm/arm-protos.h (arm_early_store_addr_dep): Declare.
+ (arm_early_load_addr_dep): Declare.
+ (arm_address_offset_is_imm): Declare.
+ * config/arm/cortex-m4.md: New file.
+ * config/arm/cortex-m4-fpu.md: New file.
+ * config/arm/arm.md: Include cortex-m4.md and cortex-m4-fpu.md.
+ (attr generic_sched): Exclude cortexm4.
+ (attr generic_vfp): Exclude cortexm4.
+
+=== modified file 'gcc/config/arm/arm-protos.h'
+Index: gcc-4.5/gcc/config/arm/arm-protos.h
+===================================================================
+--- gcc-4.5.orig/gcc/config/arm/arm-protos.h
++++ gcc-4.5/gcc/config/arm/arm-protos.h
+@@ -87,6 +87,8 @@ extern int arm_coproc_mem_operand (rtx,
+ extern int neon_vector_mem_operand (rtx, int);
+ extern int neon_struct_mem_operand (rtx);
+ extern int arm_no_early_store_addr_dep (rtx, rtx);
++extern int arm_early_store_addr_dep (rtx, rtx);
++extern int arm_early_load_addr_dep (rtx, rtx);
+ extern int arm_no_early_alu_shift_dep (rtx, rtx);
+ extern int arm_no_early_alu_shift_value_dep (rtx, rtx);
+ extern int arm_no_early_mul_dep (rtx, rtx);
+@@ -131,6 +133,7 @@ extern const char *output_move_quad (rtx
+ extern const char *output_move_vfp (rtx *operands);
+ extern const char *output_move_neon (rtx *operands);
+ extern int arm_attr_length_move_neon (rtx);
++extern int arm_address_offset_is_imm (rtx);
+ extern const char *output_add_immediate (rtx *);
+ extern const char *arithmetic_instr (rtx, int);
+ extern void output_ascii_pseudo_op (FILE *, const unsigned char *, int);
+Index: gcc-4.5/gcc/config/arm/arm.c
+===================================================================
+--- gcc-4.5.orig/gcc/config/arm/arm.c
++++ gcc-4.5/gcc/config/arm/arm.c
+@@ -13542,6 +13542,34 @@ arm_attr_length_move_neon (rtx insn)
+ return 4;
+ }
+
++/* Return nonzero if the offset in the address is an immediate. Otherwise,
++ return zero. */
++
++int
++arm_address_offset_is_imm (rtx insn)
++{
++ rtx mem, addr;
++
++ extract_insn_cached (insn);
++
++ if (REG_P (recog_data.operand[0]))
++ return 0;
++
++ mem = recog_data.operand[0];
++
++ gcc_assert (MEM_P (mem));
++
++ addr = XEXP (mem, 0);
++
++ if (GET_CODE (addr) == REG
++ || (GET_CODE (addr) == PLUS
++ && GET_CODE (XEXP (addr, 0)) == REG
++ && GET_CODE (XEXP (addr, 1)) == CONST_INT))
++ return 1;
++ else
++ return 0;
++}
++
+ /* Output an ADD r, s, #n where n may be too big for one instruction.
+ If adding zero to one register, output nothing. */
+ const char *
+@@ -21620,6 +21648,38 @@ arm_no_early_store_addr_dep (rtx produce
+ return !reg_overlap_mentioned_p (value, addr);
+ }
+
++/* Return nonzero if the CONSUMER instruction (a store) does need
++ PRODUCER's value to calculate the address. */
++
++int
++arm_early_store_addr_dep (rtx producer, rtx consumer)
++{
++ return !arm_no_early_store_addr_dep (producer, consumer);
++}
++
++/* Return nonzero if the CONSUMER instruction (a load) does need
++ PRODUCER's value to calculate the address. */
++
++int
++arm_early_load_addr_dep (rtx producer, rtx consumer)
++{
++ rtx value = PATTERN (producer);
++ rtx addr = PATTERN (consumer);
++
++ if (GET_CODE (value) == COND_EXEC)
++ value = COND_EXEC_CODE (value);
++ if (GET_CODE (value) == PARALLEL)
++ value = XVECEXP (value, 0, 0);
++ value = XEXP (value, 0);
++ if (GET_CODE (addr) == COND_EXEC)
++ addr = COND_EXEC_CODE (addr);
++ if (GET_CODE (addr) == PARALLEL)
++ addr = XVECEXP (addr, 0, 0);
++ addr = XEXP (addr, 1);
++
++ return reg_overlap_mentioned_p (value, addr);
++}
++
+ /* Return nonzero if the CONSUMER instruction (an ALU op) does not
+ have an early register shift value or amount dependency on the
+ result of PRODUCER. */
+Index: gcc-4.5/gcc/config/arm/arm.md
+===================================================================
+--- gcc-4.5.orig/gcc/config/arm/arm.md
++++ gcc-4.5/gcc/config/arm/arm.md
+@@ -434,16 +434,16 @@
+ ;; True if the generic scheduling description should be used.
+
+ (define_attr "generic_sched" "yes,no"
+- (const (if_then_else
+- (ior (eq_attr "tune" "arm926ejs,arm1020e,arm1026ejs,arm1136js,arm1136jfs,cortexa5,cortexa8,cortexa9")
+- (eq_attr "tune_cortexr4" "yes"))
++ (const (if_then_else
++ (ior (eq_attr "tune" "arm926ejs,arm1020e,arm1026ejs,arm1136js,arm1136jfs,cortexa5,cortexa8,cortexa9,cortexm4")
++ (eq_attr "tune_cortexr4" "yes"))
+ (const_string "no")
+ (const_string "yes"))))
+
+ (define_attr "generic_vfp" "yes,no"
+ (const (if_then_else
+ (and (eq_attr "fpu" "vfp")
+- (eq_attr "tune" "!arm1020e,arm1022e,cortexa5,cortexa8,cortexa9")
++ (eq_attr "tune" "!arm1020e,arm1022e,cortexa5,cortexa8,cortexa9,cortexm4")
+ (eq_attr "tune_cortexr4" "no"))
+ (const_string "yes")
+ (const_string "no"))))
+@@ -472,6 +472,8 @@
+ (include "cortex-a9.md")
+ (include "cortex-r4.md")
+ (include "cortex-r4f.md")
++(include "cortex-m4.md")
++(include "cortex-m4-fpu.md")
+ (include "vfp11.md")
+
+
+Index: gcc-4.5/gcc/config/arm/cortex-m4-fpu.md
+===================================================================
+--- /dev/null
++++ gcc-4.5/gcc/config/arm/cortex-m4-fpu.md
+@@ -0,0 +1,111 @@
++;; ARM Cortex-M4 FPU pipeline description
++;; Copyright (C) 2010 Free Software Foundation, Inc.
++;; Contributed by CodeSourcery.
++;;
++;; This file is part of GCC.
++;;
++;; GCC is free software; you can redistribute it and/or modify it
++;; under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 3, or (at your option)
++;; any later version.
++;;
++;; GCC is distributed in the hope that it will be useful, but
++;; WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++;; General Public License for more details.
++;;
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3. If not see
++;; <http://www.gnu.org/licenses/>.
++
++;; Use an artifial unit to model FPU.
++(define_cpu_unit "cortex_m4_v" "cortex_m4")
++
++(define_reservation "cortex_m4_ex_v" "cortex_m4_ex+cortex_m4_v")
++
++;; Integer instructions following VDIV or VSQRT complete out-of-order.
++(define_insn_reservation "cortex_m4_fdivs" 15
++ (and (eq_attr "tune" "cortexm4")
++ (eq_attr "type" "fdivs"))
++ "cortex_m4_ex_v,cortex_m4_v*13")
++
++(define_insn_reservation "cortex_m4_vmov_1" 1
++ (and (eq_attr "tune" "cortexm4")
++ (eq_attr "type" "fcpys,fconsts"))
++ "cortex_m4_ex_v")
++
++(define_insn_reservation "cortex_m4_vmov_2" 2
++ (and (eq_attr "tune" "cortexm4")
++ (eq_attr "type" "f_2_r,r_2_f"))
++ "cortex_m4_ex_v*2")
++
++(define_insn_reservation "cortex_m4_fmuls" 2
++ (and (eq_attr "tune" "cortexm4")
++ (eq_attr "type" "fmuls"))
++ "cortex_m4_ex_v")
++
++(define_insn_reservation "cortex_m4_fmacs" 4
++ (and (eq_attr "tune" "cortexm4")
++ (eq_attr "type" "fmacs"))
++ "cortex_m4_ex_v*3")
++
++(define_insn_reservation "cortex_m4_ffariths" 1
++ (and (eq_attr "tune" "cortexm4")
++ (eq_attr "type" "ffariths"))
++ "cortex_m4_ex_v")
++
++(define_insn_reservation "cortex_m4_fadds" 2
++ (and (eq_attr "tune" "cortexm4")
++ (eq_attr "type" "fadds"))
++ "cortex_m4_ex_v")
++
++(define_insn_reservation "cortex_m4_fcmps" 1
++ (and (eq_attr "tune" "cortexm4")
++ (eq_attr "type" "fcmps"))
++ "cortex_m4_ex_v")
++
++(define_insn_reservation "cortex_m4_f_flag" 1
++ (and (eq_attr "tune" "cortexm4")
++ (eq_attr "type" "f_flag"))
++ "cortex_m4_ex_v")
++
++(define_insn_reservation "cortex_m4_f_cvt" 2
++ (and (eq_attr "tune" "cortexm4")
++ (eq_attr "type" "f_cvt"))
++ "cortex_m4_ex_v")
++
++(define_insn_reservation "cortex_m4_f_load" 2
++ (and (eq_attr "tune" "cortexm4")
++ (eq_attr "type" "f_load"))
++ "cortex_m4_ex_v*2")
++
++(define_insn_reservation "cortex_m4_f_store" 2
++ (and (eq_attr "tune" "cortexm4")
++ (eq_attr "type" "f_store"))
++ "cortex_m4_ex_v*2")
++
++(define_insn_reservation "cortex_m4_f_loadd" 3
++ (and (eq_attr "tune" "cortexm4")
++ (eq_attr "type" "f_loadd"))
++ "cortex_m4_ex_v*3")
++
++(define_insn_reservation "cortex_m4_f_stored" 3
++ (and (eq_attr "tune" "cortexm4")
++ (eq_attr "type" "f_stored"))
++ "cortex_m4_ex_v*3")
++
++;; MAC instructions consume their addend one cycle later. If the result
++;; of an arithmetic instruction is consumed as the addend of the following
++;; MAC instruction, the latency can be decreased by one.
++
++(define_bypass 1 "cortex_m4_fadds,cortex_m4_fmuls,cortex_m4_f_cvt"
++ "cortex_m4_fmacs"
++ "arm_no_early_mul_dep")
++
++(define_bypass 3 "cortex_m4_fmacs"
++ "cortex_m4_fmacs"
++ "arm_no_early_mul_dep")
++
++(define_bypass 14 "cortex_m4_fdivs"
++ "cortex_m4_fmacs"
++ "arm_no_early_mul_dep")
+Index: gcc-4.5/gcc/config/arm/cortex-m4.md
+===================================================================
+--- /dev/null
++++ gcc-4.5/gcc/config/arm/cortex-m4.md
+@@ -0,0 +1,111 @@
++;; ARM Cortex-M4 pipeline description
++;; Copyright (C) 2010 Free Software Foundation, Inc.
++;; Contributed by CodeSourcery.
++;;
++;; This file is part of GCC.
++;;
++;; GCC is free software; you can redistribute it and/or modify it
++;; under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 3, or (at your option)
++;; any later version.
++;;
++;; GCC is distributed in the hope that it will be useful, but
++;; WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++;; General Public License for more details.
++;;
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3. If not see
++;; <http://www.gnu.org/licenses/>.
++
++(define_automaton "cortex_m4")
++
++;; We model the pipelining of LDR instructions by using two artificial units.
++
++(define_cpu_unit "cortex_m4_a" "cortex_m4")
++
++(define_cpu_unit "cortex_m4_b" "cortex_m4")
++
++(define_reservation "cortex_m4_ex" "cortex_m4_a+cortex_m4_b")
++
++;; ALU and multiply is one cycle.
++(define_insn_reservation "cortex_m4_alu" 1
++ (and (eq_attr "tune" "cortexm4")
++ (eq_attr "type" "alu,alu_shift,alu_shift_reg,mult"))
++ "cortex_m4_ex")
++
++;; Byte, half-word and word load is two cycles.
++(define_insn_reservation "cortex_m4_load1" 2
++ (and (eq_attr "tune" "cortexm4")
++ (eq_attr "type" "load_byte,load1"))
++ "cortex_m4_a, cortex_m4_b")
++
++;; str rx, [ry, #imm] is always one cycle.
++(define_insn_reservation "cortex_m4_store1_1" 1
++ (and (and (eq_attr "tune" "cortexm4")
++ (eq_attr "type" "store1"))
++ (ne (symbol_ref ("arm_address_offset_is_imm (insn)")) (const_int 0)))
++ "cortex_m4_a")
++
++;; Other byte, half-word and word load is two cycles.
++(define_insn_reservation "cortex_m4_store1_2" 2
++ (and (and (eq_attr "tune" "cortexm4")
++ (eq_attr "type" "store1"))
++ (eq (symbol_ref ("arm_address_offset_is_imm (insn)")) (const_int 0)))
++ "cortex_m4_a*2")
++
++(define_insn_reservation "cortex_m4_load2" 3
++ (and (eq_attr "tune" "cortexm4")
++ (eq_attr "type" "load2"))
++ "cortex_m4_ex*3")
++
++(define_insn_reservation "cortex_m4_store2" 3
++ (and (eq_attr "tune" "cortexm4")
++ (eq_attr "type" "store2"))
++ "cortex_m4_ex*3")
++
++(define_insn_reservation "cortex_m4_load3" 4
++ (and (eq_attr "tune" "cortexm4")
++ (eq_attr "type" "load3"))
++ "cortex_m4_ex*4")
++
++(define_insn_reservation "cortex_m4_store3" 4
++ (and (eq_attr "tune" "cortexm4")
++ (eq_attr "type" "store3"))
++ "cortex_m4_ex*4")
++
++(define_insn_reservation "cortex_m4_load4" 5
++ (and (eq_attr "tune" "cortexm4")
++ (eq_attr "type" "load4"))
++ "cortex_m4_ex*5")
++
++(define_insn_reservation "cortex_m4_store4" 5
++ (and (eq_attr "tune" "cortexm4")
++ (eq_attr "type" "store4"))
++ "cortex_m4_ex*5")
++
++;; If the address of load or store depends on the result of the preceding
++;; instruction, the latency is increased by one.
++
++(define_bypass 2 "cortex_m4_alu"
++ "cortex_m4_load1"
++ "arm_early_load_addr_dep")
++
++(define_bypass 2 "cortex_m4_alu"
++ "cortex_m4_store1_1,cortex_m4_store1_2"
++ "arm_early_store_addr_dep")
++
++(define_insn_reservation "cortex_m4_branch" 3
++ (and (eq_attr "tune" "cortexm4")
++ (eq_attr "type" "branch"))
++ "cortex_m4_ex*3")
++
++(define_insn_reservation "cortex_m4_call" 3
++ (and (eq_attr "tune" "cortexm4")
++ (eq_attr "type" "call"))
++ "cortex_m4_ex*3")
++
++(define_insn_reservation "cortex_m4_block" 1
++ (and (eq_attr "tune" "cortexm4")
++ (eq_attr "type" "block"))
++ "cortex_m4_ex")
diff --git a/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99405.patch b/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99405.patch
new file mode 100644
index 0000000000..7fc943f4bc
--- /dev/null
+++ b/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99405.patch
@@ -0,0 +1,36 @@
+2010-09-22 Chung-Lin Tang <cltang@codesourcery.com>
+
+ Backport from mainline:
+
+ 2010-09-22 Chung-Lin Tang <cltang@codesourcery.com>
+
+ gcc/
+ * postreload.c (move2add_note_store): Add reg_symbol_ref[] checks
+ to update conditions. Fix reg_mode[] check.
+
+=== modified file 'gcc/postreload.c'
+Index: gcc-4.5/gcc/postreload.c
+===================================================================
+--- gcc-4.5.orig/gcc/postreload.c
++++ gcc-4.5/gcc/postreload.c
+@@ -2103,15 +2103,17 @@ move2add_note_store (rtx dst, const_rtx
+ && (MODES_OK_FOR_MOVE2ADD
+ (dst_mode, reg_mode[REGNO (XEXP (src, 1))])))
+ {
+- if (reg_base_reg[REGNO (XEXP (src, 1))] < 0)
++ if (reg_base_reg[REGNO (XEXP (src, 1))] < 0
++ && reg_symbol_ref[REGNO (XEXP (src, 1))] == NULL_RTX)
+ offset = reg_offset[REGNO (XEXP (src, 1))];
+ /* Maybe the first register is known to be a
+ constant. */
+ else if (reg_set_luid[REGNO (base_reg)]
+ > move2add_last_label_luid
+ && (MODES_OK_FOR_MOVE2ADD
+- (dst_mode, reg_mode[REGNO (XEXP (src, 1))]))
+- && reg_base_reg[REGNO (base_reg)] < 0)
++ (dst_mode, reg_mode[REGNO (base_reg)]))
++ && reg_base_reg[REGNO (base_reg)] < 0
++ && reg_symbol_ref[REGNO (base_reg)] == NULL_RTX)
+ {
+ offset = reg_offset[REGNO (base_reg)];
+ base_reg = XEXP (src, 1);
diff --git a/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99406.patch b/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99406.patch
new file mode 100644
index 0000000000..54473fa234
--- /dev/null
+++ b/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99406.patch
@@ -0,0 +1,20 @@
+2010-09-28 Jie Zhang <jie@codesourcery.com>
+
+ Backport from mainline:
+
+ gcc/testsuite/
+ 2010-09-28 Jie Zhang <jie@codesourcery.com>
+ * gcc.dg/Wcxx-compat-12.c: Add -fno-short-enums.
+
+=== modified file 'gcc/testsuite/gcc.dg/Wcxx-compat-12.c'
+Index: gcc-4.5/gcc/testsuite/gcc.dg/Wcxx-compat-12.c
+===================================================================
+--- gcc-4.5.orig/gcc/testsuite/gcc.dg/Wcxx-compat-12.c
++++ gcc-4.5/gcc/testsuite/gcc.dg/Wcxx-compat-12.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-Wc++-compat" } */
++/* { dg-options "-fno-short-enums -Wc++-compat" } */
+
+ enum E { A };
+
diff --git a/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99407.patch b/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99407.patch
new file mode 100644
index 0000000000..80f4246ed2
--- /dev/null
+++ b/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99407.patch
@@ -0,0 +1,33 @@
+2010-09-30 Jie Zhang <jie@codesourcery.com>
+
+ gcc/testsuite/
+
+ * c-c++-common/uninit-17.c: Adjust warning message.
+
+ Backport from mainline:
+
+ 2010-07-30 Xinliang David Li <davidxl@google.com>
+ PR tree-optimization/45121
+ * c-c++-common/uninit-17.c: Add -fno-ivops option.
+
+=== modified file 'gcc/testsuite/c-c++-common/uninit-17.c'
+Index: gcc-4.5/gcc/testsuite/c-c++-common/uninit-17.c
+===================================================================
+--- gcc-4.5.orig/gcc/testsuite/c-c++-common/uninit-17.c
++++ gcc-4.5/gcc/testsuite/c-c++-common/uninit-17.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -Wuninitialized" } */
++/* { dg-options "-O2 -Wuninitialized -fno-ivopts" } */
+
+ inline int foo(int x)
+ {
+@@ -9,7 +9,7 @@ static void bar(int a, int *ptr)
+ {
+ do
+ {
+- int b; /* { dg-warning "is used uninitialized" } */
++ int b; /* { dg-warning "may be used uninitialized" } */
+ if (b < 40) {
+ ptr[0] = b;
+ }
diff --git a/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99408.patch b/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99408.patch
new file mode 100644
index 0000000000..1d873ba653
--- /dev/null
+++ b/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99408.patch
@@ -0,0 +1,603 @@
+2010-10-01 Julian Brown <julian@codesourcery.com>
+
+ Revert:
+
+ Backport from FSF:
+
+ 2010-08-07 Marcus Shawcroft <marcus.shawcroft@arm.com>
+
+ gcc/
+ * config/arm/linux-atomic.c (SUBWORD_VAL_CAS): Instantiate with
+ 'unsigned short' and 'unsigned char' instead of 'short' and
+ 'char'. (SUBWORD_BOOL_CAS): Likewise.
+ (SUBWORD_SYNC_OP): Likewise.
+ (SUBWORD_TEST_AND_SET): Likewise.
+ (FETCH_AND_OP_WORD): Parenthesise INF_OP
+ (SUBWORD_SYNC_OP): Likewise.
+ (OP_AND_FETCH_WORD): Likewise.
+
+ gcc/testsuite/
+ * lib/target-supports.exp: (check_effective_target_sync_int_long):
+ Add arm*-*-linux-gnueabi.
+ (check_effective_target_sync_char_short): Likewise.
+
+=== modified file 'gcc/config/arm/arm-protos.h'
+Index: gcc-4.5/gcc/config/arm/arm-protos.h
+===================================================================
+--- gcc-4.5.orig/gcc/config/arm/arm-protos.h
++++ gcc-4.5/gcc/config/arm/arm-protos.h
+@@ -151,11 +151,6 @@ extern const char *vfp_output_fstmd (rtx
+ extern void arm_set_return_address (rtx, rtx);
+ extern int arm_eliminable_register (rtx);
+ extern const char *arm_output_shift(rtx *, int);
+-extern void arm_expand_sync (enum machine_mode, struct arm_sync_generator *,
+- rtx, rtx, rtx, rtx);
+-extern const char *arm_output_memory_barrier (rtx *);
+-extern const char *arm_output_sync_insn (rtx, rtx *);
+-extern unsigned int arm_sync_loop_insns (rtx , rtx *);
+
+ extern bool arm_output_addr_const_extra (FILE *, rtx);
+
+Index: gcc-4.5/gcc/config/arm/arm.c
+===================================================================
+--- gcc-4.5.orig/gcc/config/arm/arm.c
++++ gcc-4.5/gcc/config/arm/arm.c
+@@ -605,7 +605,6 @@ static int thumb_call_reg_needed;
+ #define FL_NEON (1 << 20) /* Neon instructions. */
+ #define FL_ARCH7EM (1 << 21) /* Instructions present in the ARMv7E-M
+ architecture. */
+-#define FL_ARCH7 (1 << 22) /* Architecture 7. */
+
+ #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
+
+@@ -626,7 +625,7 @@ static int thumb_call_reg_needed;
+ #define FL_FOR_ARCH6ZK FL_FOR_ARCH6K
+ #define FL_FOR_ARCH6T2 (FL_FOR_ARCH6 | FL_THUMB2)
+ #define FL_FOR_ARCH6M (FL_FOR_ARCH6 & ~FL_NOTM)
+-#define FL_FOR_ARCH7 ((FL_FOR_ARCH6T2 & ~FL_NOTM) | FL_ARCH7)
++#define FL_FOR_ARCH7 (FL_FOR_ARCH6T2 &~ FL_NOTM)
+ #define FL_FOR_ARCH7A (FL_FOR_ARCH7 | FL_NOTM | FL_ARCH6K)
+ #define FL_FOR_ARCH7R (FL_FOR_ARCH7A | FL_DIV)
+ #define FL_FOR_ARCH7M (FL_FOR_ARCH7 | FL_DIV)
+@@ -664,9 +663,6 @@ int arm_arch6 = 0;
+ /* Nonzero if this chip supports the ARM 6K extensions. */
+ int arm_arch6k = 0;
+
+-/* Nonzero if this chip supports the ARM 7 extensions. */
+-int arm_arch7 = 0;
+-
+ /* Nonzero if instructions not present in the 'M' profile can be used. */
+ int arm_arch_notm = 0;
+
+@@ -1638,7 +1634,6 @@ arm_override_options (void)
+ arm_arch6 = (insn_flags & FL_ARCH6) != 0;
+ arm_arch6k = (insn_flags & FL_ARCH6K) != 0;
+ arm_arch_notm = (insn_flags & FL_NOTM) != 0;
+- arm_arch7 = (insn_flags & FL_ARCH7) != 0;
+ arm_arch7em = (insn_flags & FL_ARCH7EM) != 0;
+ arm_arch_thumb2 = (insn_flags & FL_THUMB2) != 0;
+ arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
+@@ -16595,17 +16590,6 @@ arm_print_operand (FILE *stream, rtx x,
+ }
+ return;
+
+- case 'C':
+- {
+- rtx addr;
+-
+- gcc_assert (GET_CODE (x) == MEM);
+- addr = XEXP (x, 0);
+- gcc_assert (GET_CODE (addr) == REG);
+- asm_fprintf (stream, "[%r]", REGNO (addr));
+- }
+- return;
+-
+ /* Translate an S register number into a D register number and element index. */
+ case 'y':
+ {
+@@ -22840,372 +22824,4 @@ arm_builtin_support_vector_misalignment
+ is_packed);
+ }
+
+-/* Legitimize a memory reference for sync primitive implemented using
+- ldrex / strex. We currently force the form of the reference to be
+- indirect without offset. We do not yet support the indirect offset
+- addressing supported by some ARM targets for these
+- instructions. */
+-static rtx
+-arm_legitimize_sync_memory (rtx memory)
+-{
+- rtx addr = force_reg (Pmode, XEXP (memory, 0));
+- rtx legitimate_memory = gen_rtx_MEM (GET_MODE (memory), addr);
+-
+- set_mem_alias_set (legitimate_memory, ALIAS_SET_MEMORY_BARRIER);
+- MEM_VOLATILE_P (legitimate_memory) = MEM_VOLATILE_P (memory);
+- return legitimate_memory;
+-}
+-
+-/* An instruction emitter. */
+-typedef void (* emit_f) (int label, const char *, rtx *);
+-
+-/* An instruction emitter that emits via the conventional
+- output_asm_insn. */
+-static void
+-arm_emit (int label ATTRIBUTE_UNUSED, const char *pattern, rtx *operands)
+-{
+- output_asm_insn (pattern, operands);
+-}
+-
+-/* Count the number of emitted synchronization instructions. */
+-static unsigned arm_insn_count;
+-
+-/* An emitter that counts emitted instructions but does not actually
+- emit instruction into the the instruction stream. */
+-static void
+-arm_count (int label,
+- const char *pattern ATTRIBUTE_UNUSED,
+- rtx *operands ATTRIBUTE_UNUSED)
+-{
+- if (! label)
+- ++ arm_insn_count;
+-}
+-
+-/* Construct a pattern using conventional output formatting and feed
+- it to output_asm_insn. Provides a mechanism to construct the
+- output pattern on the fly. Note the hard limit on the pattern
+- buffer size. */
+-static void
+-arm_output_asm_insn (emit_f emit, int label, rtx *operands,
+- const char *pattern, ...)
+-{
+- va_list ap;
+- char buffer[256];
+-
+- va_start (ap, pattern);
+- vsprintf (buffer, pattern, ap);
+- va_end (ap);
+- emit (label, buffer, operands);
+-}
+-
+-/* Emit the memory barrier instruction, if any, provided by this
+- target to a specified emitter. */
+-static void
+-arm_process_output_memory_barrier (emit_f emit, rtx *operands)
+-{
+- if (TARGET_HAVE_DMB)
+- {
+- /* Note we issue a system level barrier. We should consider
+- issuing a inner shareabilty zone barrier here instead, ie.
+- "DMB ISH". */
+- emit (0, "dmb\tsy", operands);
+- return;
+- }
+-
+- if (TARGET_HAVE_DMB_MCR)
+- {
+- emit (0, "mcr\tp15, 0, r0, c7, c10, 5", operands);
+- return;
+- }
+-
+- gcc_unreachable ();
+-}
+-
+-/* Emit the memory barrier instruction, if any, provided by this
+- target. */
+-const char *
+-arm_output_memory_barrier (rtx *operands)
+-{
+- arm_process_output_memory_barrier (arm_emit, operands);
+- return "";
+-}
+-
+-/* Helper to figure out the instruction suffix required on ldrex/strex
+- for operations on an object of the specified mode. */
+-static const char *
+-arm_ldrex_suffix (enum machine_mode mode)
+-{
+- switch (mode)
+- {
+- case QImode: return "b";
+- case HImode: return "h";
+- case SImode: return "";
+- case DImode: return "d";
+- default:
+- gcc_unreachable ();
+- }
+- return "";
+-}
+-
+-/* Emit an ldrex{b,h,d, } instruction appropriate for the specified
+- mode. */
+-static void
+-arm_output_ldrex (emit_f emit,
+- enum machine_mode mode,
+- rtx target,
+- rtx memory)
+-{
+- const char *suffix = arm_ldrex_suffix (mode);
+- rtx operands[2];
+-
+- operands[0] = target;
+- operands[1] = memory;
+- arm_output_asm_insn (emit, 0, operands, "ldrex%s\t%%0, %%C1", suffix);
+-}
+-
+-/* Emit a strex{b,h,d, } instruction appropriate for the specified
+- mode. */
+-static void
+-arm_output_strex (emit_f emit,
+- enum machine_mode mode,
+- const char *cc,
+- rtx result,
+- rtx value,
+- rtx memory)
+-{
+- const char *suffix = arm_ldrex_suffix (mode);
+- rtx operands[3];
+-
+- operands[0] = result;
+- operands[1] = value;
+- operands[2] = memory;
+- arm_output_asm_insn (emit, 0, operands, "strex%s%s\t%%0, %%1, %%C2", suffix,
+- cc);
+-}
+-
+-/* Helper to emit a two operand instruction. */
+-static void
+-arm_output_op2 (emit_f emit, const char *mnemonic, rtx d, rtx s)
+-{
+- rtx operands[2];
+-
+- operands[0] = d;
+- operands[1] = s;
+- arm_output_asm_insn (emit, 0, operands, "%s\t%%0, %%1", mnemonic);
+-}
+-
+-/* Helper to emit a three operand instruction. */
+-static void
+-arm_output_op3 (emit_f emit, const char *mnemonic, rtx d, rtx a, rtx b)
+-{
+- rtx operands[3];
+-
+- operands[0] = d;
+- operands[1] = a;
+- operands[2] = b;
+- arm_output_asm_insn (emit, 0, operands, "%s\t%%0, %%1, %%2", mnemonic);
+-}
+-
+-/* Emit a load store exclusive synchronization loop.
+-
+- do
+- old_value = [mem]
+- if old_value != required_value
+- break;
+- t1 = sync_op (old_value, new_value)
+- [mem] = t1, t2 = [0|1]
+- while ! t2
+-
+- Note:
+- t1 == t2 is not permitted
+- t1 == old_value is permitted
+-
+- required_value:
+-
+- RTX register or const_int representing the required old_value for
+- the modify to continue, if NULL no comparsion is performed. */
+-static void
+-arm_output_sync_loop (emit_f emit,
+- enum machine_mode mode,
+- rtx old_value,
+- rtx memory,
+- rtx required_value,
+- rtx new_value,
+- rtx t1,
+- rtx t2,
+- enum attr_sync_op sync_op,
+- int early_barrier_required)
+-{
+- rtx operands[1];
+-
+- gcc_assert (t1 != t2);
+-
+- if (early_barrier_required)
+- arm_process_output_memory_barrier (emit, NULL);
+-
+- arm_output_asm_insn (emit, 1, operands, "%sLSYT%%=:", LOCAL_LABEL_PREFIX);
+-
+- arm_output_ldrex (emit, mode, old_value, memory);
+-
+- if (required_value)
+- {
+- rtx operands[2];
+-
+- operands[0] = old_value;
+- operands[1] = required_value;
+- arm_output_asm_insn (emit, 0, operands, "cmp\t%%0, %%1");
+- arm_output_asm_insn (emit, 0, operands, "bne\t%sLSYB%%=", LOCAL_LABEL_PREFIX);
+- }
+-
+- switch (sync_op)
+- {
+- case SYNC_OP_ADD:
+- arm_output_op3 (emit, "add", t1, old_value, new_value);
+- break;
+-
+- case SYNC_OP_SUB:
+- arm_output_op3 (emit, "sub", t1, old_value, new_value);
+- break;
+-
+- case SYNC_OP_IOR:
+- arm_output_op3 (emit, "orr", t1, old_value, new_value);
+- break;
+-
+- case SYNC_OP_XOR:
+- arm_output_op3 (emit, "eor", t1, old_value, new_value);
+- break;
+-
+- case SYNC_OP_AND:
+- arm_output_op3 (emit,"and", t1, old_value, new_value);
+- break;
+-
+- case SYNC_OP_NAND:
+- arm_output_op3 (emit, "and", t1, old_value, new_value);
+- arm_output_op2 (emit, "mvn", t1, t1);
+- break;
+-
+- case SYNC_OP_NONE:
+- t1 = new_value;
+- break;
+- }
+-
+- arm_output_strex (emit, mode, "", t2, t1, memory);
+- operands[0] = t2;
+- arm_output_asm_insn (emit, 0, operands, "teq\t%%0, #0");
+- arm_output_asm_insn (emit, 0, operands, "bne\t%sLSYT%%=", LOCAL_LABEL_PREFIX);
+-
+- arm_process_output_memory_barrier (emit, NULL);
+- arm_output_asm_insn (emit, 1, operands, "%sLSYB%%=:", LOCAL_LABEL_PREFIX);
+-}
+-
+-static rtx
+-arm_get_sync_operand (rtx *operands, int index, rtx default_value)
+-{
+- if (index > 0)
+- default_value = operands[index - 1];
+-
+- return default_value;
+-}
+-
+-#define FETCH_SYNC_OPERAND(NAME, DEFAULT) \
+- arm_get_sync_operand (operands, (int) get_attr_sync_##NAME (insn), DEFAULT);
+-
+-/* Extract the operands for a synchroniztion instruction from the
+- instructions attributes and emit the instruction. */
+-static void
+-arm_process_output_sync_insn (emit_f emit, rtx insn, rtx *operands)
+-{
+- rtx result, memory, required_value, new_value, t1, t2;
+- int early_barrier;
+- enum machine_mode mode;
+- enum attr_sync_op sync_op;
+-
+- result = FETCH_SYNC_OPERAND(result, 0);
+- memory = FETCH_SYNC_OPERAND(memory, 0);
+- required_value = FETCH_SYNC_OPERAND(required_value, 0);
+- new_value = FETCH_SYNC_OPERAND(new_value, 0);
+- t1 = FETCH_SYNC_OPERAND(t1, 0);
+- t2 = FETCH_SYNC_OPERAND(t2, 0);
+- early_barrier =
+- get_attr_sync_release_barrier (insn) == SYNC_RELEASE_BARRIER_YES;
+- sync_op = get_attr_sync_op (insn);
+- mode = GET_MODE (memory);
+-
+- arm_output_sync_loop (emit, mode, result, memory, required_value,
+- new_value, t1, t2, sync_op, early_barrier);
+-}
+-
+-/* Emit a synchronization instruction loop. */
+-const char *
+-arm_output_sync_insn (rtx insn, rtx *operands)
+-{
+- arm_process_output_sync_insn (arm_emit, insn, operands);
+- return "";
+-}
+-
+-/* Count the number of machine instruction that will be emitted for a
+- synchronization instruction. Note that the emitter used does not
+- emit instructions, it just counts instructions being carefull not
+- to count labels. */
+-unsigned int
+-arm_sync_loop_insns (rtx insn, rtx *operands)
+-{
+- arm_insn_count = 0;
+- arm_process_output_sync_insn (arm_count, insn, operands);
+- return arm_insn_count;
+-}
+-
+-/* Helper to call a target sync instruction generator, dealing with
+- the variation in operands required by the different generators. */
+-static rtx
+-arm_call_generator (struct arm_sync_generator *generator, rtx old_value,
+- rtx memory, rtx required_value, rtx new_value)
+-{
+- switch (generator->op)
+- {
+- case arm_sync_generator_omn:
+- gcc_assert (! required_value);
+- return generator->u.omn (old_value, memory, new_value);
+-
+- case arm_sync_generator_omrn:
+- gcc_assert (required_value);
+- return generator->u.omrn (old_value, memory, required_value, new_value);
+- }
+-
+- return NULL;
+-}
+-
+-/* Expand a synchronization loop. The synchronization loop is expanded
+- as an opaque block of instructions in order to ensure that we do
+- not subsequently get extraneous memory accesses inserted within the
+- critical region. The exclusive access property of ldrex/strex is
+- only guaranteed in there are no intervening memory accesses. */
+-void
+-arm_expand_sync (enum machine_mode mode,
+- struct arm_sync_generator *generator,
+- rtx target, rtx memory, rtx required_value, rtx new_value)
+-{
+- if (target == NULL)
+- target = gen_reg_rtx (mode);
+-
+- memory = arm_legitimize_sync_memory (memory);
+- if (mode != SImode)
+- {
+- rtx load_temp = gen_reg_rtx (SImode);
+-
+- if (required_value)
+- required_value = convert_modes (SImode, mode, required_value, true);
+-
+- new_value = convert_modes (SImode, mode, new_value, true);
+- emit_insn (arm_call_generator (generator, load_temp, memory,
+- required_value, new_value));
+- emit_move_insn (target, gen_lowpart (mode, load_temp));
+- }
+- else
+- {
+- emit_insn (arm_call_generator (generator, target, memory, required_value,
+- new_value));
+- }
+-}
+-
+ #include "gt-arm.h"
+Index: gcc-4.5/gcc/config/arm/arm.h
+===================================================================
+--- gcc-4.5.orig/gcc/config/arm/arm.h
++++ gcc-4.5/gcc/config/arm/arm.h
+@@ -128,24 +128,6 @@ enum target_cpus
+ /* The processor for which instructions should be scheduled. */
+ extern enum processor_type arm_tune;
+
+-enum arm_sync_generator_tag
+- {
+- arm_sync_generator_omn,
+- arm_sync_generator_omrn
+- };
+-
+-/* Wrapper to pass around a polymorphic pointer to a sync instruction
+- generator and. */
+-struct arm_sync_generator
+-{
+- enum arm_sync_generator_tag op;
+- union
+- {
+- rtx (* omn) (rtx, rtx, rtx);
+- rtx (* omrn) (rtx, rtx, rtx, rtx);
+- } u;
+-};
+-
+ typedef enum arm_cond_code
+ {
+ ARM_EQ = 0, ARM_NE, ARM_CS, ARM_CC, ARM_MI, ARM_PL, ARM_VS, ARM_VC,
+@@ -290,20 +272,6 @@ extern void (*arm_lang_output_object_att
+ for Thumb-2. */
+ #define TARGET_UNIFIED_ASM TARGET_THUMB2
+
+-/* Nonzero if this chip provides the DMB instruction. */
+-#define TARGET_HAVE_DMB (arm_arch7)
+-
+-/* Nonzero if this chip implements a memory barrier via CP15. */
+-#define TARGET_HAVE_DMB_MCR (arm_arch6k && ! TARGET_HAVE_DMB)
+-
+-/* Nonzero if this chip implements a memory barrier instruction. */
+-#define TARGET_HAVE_MEMORY_BARRIER (TARGET_HAVE_DMB || TARGET_HAVE_DMB_MCR)
+-
+-/* Nonzero if this chip supports ldrex and strex */
+-#define TARGET_HAVE_LDREX ((arm_arch6 && TARGET_ARM) || arm_arch7)
+-
+-/* Nonzero if this chip supports ldrex{bhd} and strex{bhd}. */
+-#define TARGET_HAVE_LDREXBHD ((arm_arch6k && TARGET_ARM) || arm_arch7)
+
+ /* True iff the full BPABI is being used. If TARGET_BPABI is true,
+ then TARGET_AAPCS_BASED must be true -- but the converse does not
+@@ -437,12 +405,6 @@ extern int arm_arch5e;
+ /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
+ extern int arm_arch6;
+
+-/* Nonzero if this chip supports the ARM Architecture 6k extensions. */
+-extern int arm_arch6k;
+-
+-/* Nonzero if this chip supports the ARM Architecture 7 extensions. */
+-extern int arm_arch7;
+-
+ /* Nonzero if instructions not present in the 'M' profile can be used. */
+ extern int arm_arch_notm;
+
+Index: gcc-4.5/gcc/config/arm/arm.md
+===================================================================
+--- gcc-4.5.orig/gcc/config/arm/arm.md
++++ gcc-4.5/gcc/config/arm/arm.md
+@@ -103,7 +103,6 @@
+ (UNSPEC_RBIT 26) ; rbit operation.
+ (UNSPEC_SYMBOL_OFFSET 27) ; The offset of the start of the symbol from
+ ; another symbolic address.
+- (UNSPEC_MEMORY_BARRIER 28) ; Represent a memory barrier.
+ ]
+ )
+
+@@ -140,11 +139,6 @@
+ (VUNSPEC_ALIGN32 16) ; Used to force 32-byte alignment.
+ (VUNSPEC_EH_RETURN 20); Use to override the return address for exception
+ ; handling.
+- (VUNSPEC_SYNC_COMPARE_AND_SWAP 21) ; Represent an atomic compare swap.
+- (VUNSPEC_SYNC_LOCK 22) ; Represent a sync_lock_test_and_set.
+- (VUNSPEC_SYNC_OP 23) ; Represent a sync_<op>
+- (VUNSPEC_SYNC_NEW_OP 24) ; Represent a sync_new_<op>
+- (VUNSPEC_SYNC_OLD_OP 25) ; Represent a sync_old_<op>
+ ]
+ )
+
+@@ -169,21 +163,8 @@
+ (define_attr "fpu" "none,fpa,fpe2,fpe3,maverick,vfp"
+ (const (symbol_ref "arm_fpu_attr")))
+
+-(define_attr "sync_result" "none,0,1,2,3,4,5" (const_string "none"))
+-(define_attr "sync_memory" "none,0,1,2,3,4,5" (const_string "none"))
+-(define_attr "sync_required_value" "none,0,1,2,3,4,5" (const_string "none"))
+-(define_attr "sync_new_value" "none,0,1,2,3,4,5" (const_string "none"))
+-(define_attr "sync_t1" "none,0,1,2,3,4,5" (const_string "none"))
+-(define_attr "sync_t2" "none,0,1,2,3,4,5" (const_string "none"))
+-(define_attr "sync_release_barrier" "yes,no" (const_string "yes"))
+-(define_attr "sync_op" "none,add,sub,ior,xor,and,nand"
+- (const_string "none"))
+-
+ ; LENGTH of an instruction (in bytes)
+-(define_attr "length" ""
+- (cond [(not (eq_attr "sync_memory" "none"))
+- (symbol_ref "arm_sync_loop_insns (insn, operands) * 4")
+- ] (const_int 4)))
++(define_attr "length" "" (const_int 4))
+
+ ; POOL_RANGE is how far away from a constant pool entry that this insn
+ ; can be placed. If the distance is zero, then this insn will never
+@@ -11568,5 +11549,4 @@
+ (include "thumb2.md")
+ ;; Neon patterns
+ (include "neon.md")
+-;; Synchronization Primitives
+-(include "sync.md")
++
+Index: gcc-4.5/gcc/config/arm/predicates.md
+===================================================================
+--- gcc-4.5.orig/gcc/config/arm/predicates.md
++++ gcc-4.5/gcc/config/arm/predicates.md
+@@ -573,11 +573,6 @@
+ (and (match_test "TARGET_32BIT")
+ (match_operand 0 "arm_di_operand"))))
+
+-;; True if the operand is memory reference suitable for a ldrex/strex.
+-(define_predicate "arm_sync_memory_operand"
+- (and (match_operand 0 "memory_operand")
+- (match_code "reg" "0")))
+-
+ ;; Predicates for parallel expanders based on mode.
+ (define_special_predicate "vect_par_constant_high"
+ (match_code "parallel")
diff --git a/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99409.patch b/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99409.patch
new file mode 100644
index 0000000000..39c3ab0810
--- /dev/null
+++ b/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99409.patch
@@ -0,0 +1,18 @@
+2010-09-30 Jie Zhang <jie@codesourcery.com>
+
+ gcc/testsuite/
+ * gcc.target/arm/neon-thumb2-move.c: Add
+ dg-require-effective-target arm_thumb2_ok.
+
+=== modified file 'gcc/testsuite/gcc.target/arm/neon-thumb2-move.c'
+Index: gcc-4.5/gcc/testsuite/gcc.target/arm/neon-thumb2-move.c
+===================================================================
+--- gcc-4.5.orig/gcc/testsuite/gcc.target/arm/neon-thumb2-move.c
++++ gcc-4.5/gcc/testsuite/gcc.target/arm/neon-thumb2-move.c
+@@ -1,5 +1,6 @@
+ /* { dg-do compile } */
+ /* { dg-require-effective-target arm_neon_ok } */
++/* { dg-require-effective-target arm_thumb2_ok } */
+ /* { dg-options "-O2 -mthumb -march=armv7-a" } */
+ /* { dg-add-options arm_neon } */
+
diff --git a/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99410.patch b/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99410.patch
new file mode 100644
index 0000000000..f2a1c95621
--- /dev/null
+++ b/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99410.patch
@@ -0,0 +1,32 @@
+2010-10-06 Julian Brown <julian@codesourcery.com>
+
+ gcc/testsuite/
+ * gcc.dg/Warray-bounds-3.c: Add -fno-unroll-loops for ARM.
+ * gcc.dg/vect/vect.exp: Likewise, for all vect tests.
+
+
+=== modified file 'gcc/testsuite/gcc.dg/Warray-bounds-3.c'
+Index: gcc-4.5/gcc/testsuite/gcc.dg/Warray-bounds-3.c
+===================================================================
+--- gcc-4.5.orig/gcc/testsuite/gcc.dg/Warray-bounds-3.c
++++ gcc-4.5/gcc/testsuite/gcc.dg/Warray-bounds-3.c
+@@ -1,5 +1,7 @@
+ /* { dg-do compile } */
+ /* { dg-options "-O2 -Warray-bounds" } */
++/* { dg-options "-O2 -Warray-bounds -fno-unroll-loops" { target arm*-*-* } } */
++
+ /* based on PR 31227 */
+
+ typedef __SIZE_TYPE__ size_t;
+Index: gcc-4.5/gcc/testsuite/gcc.dg/vect/vect.exp
+===================================================================
+--- gcc-4.5.orig/gcc/testsuite/gcc.dg/vect/vect.exp
++++ gcc-4.5/gcc/testsuite/gcc.dg/vect/vect.exp
+@@ -109,6 +109,7 @@ if [istarget "powerpc-*paired*"] {
+ # default to avoid loss of precision. We must pass -ffast-math to test
+ # vectorization of float operations.
+ lappend DEFAULT_VECTCFLAGS "-ffast-math"
++ lappend DEFAULT_VECTCFLAGS "-fno-unroll-loops"
+ if [is-effective-target arm_neon_hw] {
+ set dg-do-what-default run
+ } else {
diff --git a/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99411.patch b/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99411.patch
new file mode 100644
index 0000000000..c9a9316861
--- /dev/null
+++ b/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99411.patch
@@ -0,0 +1,21 @@
+2010-10-08 Jie Zhang <jie@codesourcery.com>
+
+ * config/arm/arm.c (arm_override_options): Disable
+ -fsched-interblock for Cortex-M4.
+
+=== modified file 'gcc/config/arm/arm.c'
+Index: gcc-4.5/gcc/config/arm/arm.c
+===================================================================
+--- gcc-4.5.orig/gcc/config/arm/arm.c
++++ gcc-4.5/gcc/config/arm/arm.c
+@@ -1913,6 +1913,10 @@ arm_override_options (void)
+ fix_cm3_ldrd = 0;
+ }
+
++ /* Disable -fsched-interblock for Cortex-M4. */
++ if (arm_selected_tune->core == cortexm4)
++ flag_schedule_interblock = 0;
++
+ if (TARGET_THUMB1 && flag_schedule_insns)
+ {
+ /* Don't warn since it's on by default in -O2. */
diff --git a/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99412.patch b/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99412.patch
new file mode 100644
index 0000000000..c0aabbeb56
--- /dev/null
+++ b/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99412.patch
@@ -0,0 +1,316 @@
+2010-10-09 Jie Zhang <jie@codesourcery.com>
+
+ Backport from mainline:
+
+ gcc/
+ 2010-06-03 Paul Brook <paul@codesourcery.com>
+ * config/arm/arm.c (FL_TUNE): Define.
+ (arm_default_cpu, arm_cpu_select): Remove.
+ (all_cores): Populate core field.
+ (arm_selected_arch, arm_selected_cpu, arm_selected_tune): New.
+ (arm_find_cpu): New function.
+ (arm_handle_option): Lookup cpu/architecture names.
+ (arm_override_options): Cleanup mcpu/march/mtune handling.
+ (arm_file_start): Ditto.
+
+=== modified file 'gcc/config/arm/arm.c'
+Index: gcc-4.5/gcc/config/arm/arm.c
+===================================================================
+--- gcc-4.5.orig/gcc/config/arm/arm.c
++++ gcc-4.5/gcc/config/arm/arm.c
+@@ -550,9 +550,6 @@ enum processor_type arm_tune = arm_none;
+ /* The current tuning set. */
+ const struct tune_params *current_tune;
+
+-/* The default processor used if not overridden by commandline. */
+-static enum processor_type arm_default_cpu = arm_none;
+-
+ /* Which floating point hardware to schedule for. */
+ int arm_fpu_attr;
+
+@@ -608,6 +605,10 @@ static int thumb_call_reg_needed;
+
+ #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
+
++/* Flags that only effect tuning, not available instructions. */
++#define FL_TUNE (FL_WBUF | FL_VFPV2 | FL_STRONG | FL_LDSCHED \
++ | FL_CO_PROC)
++
+ #define FL_FOR_ARCH2 FL_NOTM
+ #define FL_FOR_ARCH3 (FL_FOR_ARCH2 | FL_MODE32)
+ #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
+@@ -808,7 +809,7 @@ static const struct processors all_cores
+ {
+ /* ARM Cores */
+ #define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
+- {NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, &arm_##COSTS##_tune},
++ {NAME, IDENT, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, &arm_##COSTS##_tune},
+ #include "arm-cores.def"
+ #undef ARM_CORE
+ {NULL, arm_none, NULL, 0, NULL}
+@@ -850,29 +851,12 @@ static const struct processors all_archi
+ {NULL, arm_none, NULL, 0 , NULL}
+ };
+
+-struct arm_cpu_select
+-{
+- const char * string;
+- const char * name;
+- const struct processors * processors;
+-};
+-
+-/* This is a magic structure. The 'string' field is magically filled in
+- with a pointer to the value specified by the user on the command line
+- assuming that the user has specified such a value. */
+-
+-static struct arm_cpu_select arm_select[] =
+-{
+- /* string name processors */
+- { NULL, "-mcpu=", all_cores },
+- { NULL, "-march=", all_architectures },
+- { NULL, "-mtune=", all_cores }
+-};
+
+-/* Defines representing the indexes into the above table. */
+-#define ARM_OPT_SET_CPU 0
+-#define ARM_OPT_SET_ARCH 1
+-#define ARM_OPT_SET_TUNE 2
++/* These are populated as commandline arguments are processed, or NULL
++ if not specified. */
++static const struct processors *arm_selected_arch;
++static const struct processors *arm_selected_cpu;
++static const struct processors *arm_selected_tune;
+
+ /* The name of the preprocessor macro to define for this architecture. */
+
+@@ -1234,6 +1218,24 @@ arm_gimplify_va_arg_expr (tree valist, t
+ return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
+ }
+
++/* Lookup NAME in SEL. */
++
++static const struct processors *
++arm_find_cpu (const char *name, const struct processors *sel, const char *desc)
++{
++ if (!(name && *name))
++ return NULL;
++
++ for (; sel->name != NULL; sel++)
++ {
++ if (streq (name, sel->name))
++ return sel;
++ }
++
++ error ("bad value (%s) for %s switch", name, desc);
++ return NULL;
++}
++
+ /* Implement TARGET_HANDLE_OPTION. */
+
+ static bool
+@@ -1242,11 +1244,11 @@ arm_handle_option (size_t code, const ch
+ switch (code)
+ {
+ case OPT_march_:
+- arm_select[1].string = arg;
++ arm_selected_arch = arm_find_cpu(arg, all_architectures, "-march");
+ return true;
+
+ case OPT_mcpu_:
+- arm_select[0].string = arg;
++ arm_selected_cpu = arm_find_cpu(arg, all_cores, "-mcpu");
+ return true;
+
+ case OPT_mhard_float:
+@@ -1258,7 +1260,7 @@ arm_handle_option (size_t code, const ch
+ return true;
+
+ case OPT_mtune_:
+- arm_select[2].string = arg;
++ arm_selected_tune = arm_find_cpu(arg, all_cores, "-mtune");
+ return true;
+
+ default:
+@@ -1358,88 +1360,52 @@ void
+ arm_override_options (void)
+ {
+ unsigned i;
+- enum processor_type target_arch_cpu = arm_none;
+- enum processor_type selected_cpu = arm_none;
+
+- /* Set up the flags based on the cpu/architecture selected by the user. */
+- for (i = ARRAY_SIZE (arm_select); i--;)
++ if (arm_selected_arch)
+ {
+- struct arm_cpu_select * ptr = arm_select + i;
+-
+- if (ptr->string != NULL && ptr->string[0] != '\0')
+- {
+- const struct processors * sel;
+-
+- for (sel = ptr->processors; sel->name != NULL; sel++)
+- if (streq (ptr->string, sel->name))
+- {
+- /* Set the architecture define. */
+- if (i != ARM_OPT_SET_TUNE)
+- sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
+-
+- /* Determine the processor core for which we should
+- tune code-generation. */
+- if (/* -mcpu= is a sensible default. */
+- i == ARM_OPT_SET_CPU
+- /* -mtune= overrides -mcpu= and -march=. */
+- || i == ARM_OPT_SET_TUNE)
+- arm_tune = (enum processor_type) (sel - ptr->processors);
+-
+- /* Remember the CPU associated with this architecture.
+- If no other option is used to set the CPU type,
+- we'll use this to guess the most suitable tuning
+- options. */
+- if (i == ARM_OPT_SET_ARCH)
+- target_arch_cpu = sel->core;
+-
+- if (i == ARM_OPT_SET_CPU)
+- selected_cpu = (enum processor_type) (sel - ptr->processors);
+-
+- if (i != ARM_OPT_SET_TUNE)
+- {
+- /* If we have been given an architecture and a processor
+- make sure that they are compatible. We only generate
+- a warning though, and we prefer the CPU over the
+- architecture. */
+- if (insn_flags != 0 && (insn_flags ^ sel->flags))
+- warning (0, "switch -mcpu=%s conflicts with -march= switch",
+- ptr->string);
+-
+- insn_flags = sel->flags;
+- }
+-
+- break;
+- }
++ if (arm_selected_cpu)
++ {
++ /* Check for conflict between mcpu and march */
++ if ((arm_selected_cpu->flags ^ arm_selected_arch->flags) & ~FL_TUNE)
++ {
++ warning (0, "switch -mcpu=%s conflicts with -march=%s switch",
++ arm_selected_cpu->name, arm_selected_arch->name);
++ /* -march wins for code generation.
++ -mcpu wins for default tuning. */
++ if (!arm_selected_tune)
++ arm_selected_tune = arm_selected_cpu;
+
+- if (sel->name == NULL)
+- error ("bad value (%s) for %s switch", ptr->string, ptr->name);
+- }
++ arm_selected_cpu = arm_selected_arch;
++ }
++ else
++ /* -mcpu wins. */
++ arm_selected_arch = NULL;
++ }
++ else
++ /* Pick a CPU based on the architecture. */
++ arm_selected_cpu = arm_selected_arch;
+ }
+
+- /* Guess the tuning options from the architecture if necessary. */
+- if (arm_tune == arm_none)
+- arm_tune = target_arch_cpu;
+-
+ /* If the user did not specify a processor, choose one for them. */
+- if (insn_flags == 0)
++ if (!arm_selected_cpu)
+ {
+ const struct processors * sel;
+ unsigned int sought;
+
+- selected_cpu = (enum processor_type) TARGET_CPU_DEFAULT;
+- if (selected_cpu == arm_none)
++ arm_selected_cpu = &all_cores[TARGET_CPU_DEFAULT];
++ if (!arm_selected_cpu->name)
+ {
+ #ifdef SUBTARGET_CPU_DEFAULT
+ /* Use the subtarget default CPU if none was specified by
+ configure. */
+- selected_cpu = (enum processor_type) SUBTARGET_CPU_DEFAULT;
++ arm_selected_cpu = &all_cores[SUBTARGET_CPU_DEFAULT];
+ #endif
+ /* Default to ARM6. */
+- if (selected_cpu == arm_none)
+- selected_cpu = arm6;
++ if (arm_selected_cpu->name)
++ arm_selected_cpu = &all_cores[arm6];
+ }
+- sel = &all_cores[selected_cpu];
+
++ sel = arm_selected_cpu;
+ insn_flags = sel->flags;
+
+ /* Now check to see if the user has specified some command line
+@@ -1500,17 +1466,21 @@ arm_override_options (void)
+ sel = best_fit;
+ }
+
+- insn_flags = sel->flags;
++ arm_selected_cpu = sel;
+ }
+- sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
+- arm_default_cpu = (enum processor_type) (sel - all_cores);
+- if (arm_tune == arm_none)
+- arm_tune = arm_default_cpu;
+ }
+
+- /* The processor for which we should tune should now have been
+- chosen. */
+- gcc_assert (arm_tune != arm_none);
++ gcc_assert (arm_selected_cpu);
++ /* The selected cpu may be an architecture, so lookup tuning by core ID. */
++ if (!arm_selected_tune)
++ arm_selected_tune = &all_cores[arm_selected_cpu->core];
++
++ sprintf (arm_arch_name, "__ARM_ARCH_%s__", arm_selected_cpu->arch);
++ insn_flags = arm_selected_cpu->flags;
++
++ arm_tune = arm_selected_tune->core;
++ tune_flags = arm_selected_tune->flags;
++ current_tune = arm_selected_tune->tune;
+
+ if (arm_tune == cortexa8 && optimize >= 3)
+ {
+@@ -1522,9 +1492,6 @@ arm_override_options (void)
+ align_jumps = 16;
+ }
+
+- tune_flags = all_cores[(int)arm_tune].flags;
+- current_tune = all_cores[(int)arm_tune].tune;
+-
+ if (target_fp16_format_name)
+ {
+ for (i = 0; i < ARRAY_SIZE (all_fp16_formats); i++)
+@@ -1907,7 +1874,7 @@ arm_override_options (void)
+ /* Enable -mfix-cortex-m3-ldrd by default for Cortex-M3 cores. */
+ if (fix_cm3_ldrd == 2)
+ {
+- if (selected_cpu == cortexm3)
++ if (arm_selected_cpu->core == cortexm3)
+ fix_cm3_ldrd = 1;
+ else
+ fix_cm3_ldrd = 0;
+@@ -21235,13 +21202,10 @@ arm_file_start (void)
+ if (TARGET_BPABI)
+ {
+ const char *fpu_name;
+- if (arm_select[0].string)
+- asm_fprintf (asm_out_file, "\t.cpu %s\n", arm_select[0].string);
+- else if (arm_select[1].string)
+- asm_fprintf (asm_out_file, "\t.arch %s\n", arm_select[1].string);
++ if (arm_selected_arch)
++ asm_fprintf (asm_out_file, "\t.arch %s\n", arm_selected_arch->name);
+ else
+- asm_fprintf (asm_out_file, "\t.cpu %s\n",
+- all_cores[arm_default_cpu].name);
++ asm_fprintf (asm_out_file, "\t.cpu %s\n", arm_selected_cpu->name);
+
+ if (TARGET_SOFT_FLOAT)
+ {
diff --git a/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99413.patch b/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99413.patch
new file mode 100644
index 0000000000..3f873e7fe6
--- /dev/null
+++ b/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99413.patch
@@ -0,0 +1,26 @@
+2010-10-13 Chung-Lin Tang <cltang@codesourcery.com>
+
+ Backport from mainline:
+
+ 2010-04-20 James E. Wilson <wilson@codesourcery.com>
+
+ gcc/
+ PR rtl-optimization/43520
+ * ira-lives.c (ira_implicitly_set_insn_hard_regs): Exclude classes with
+ zero available registers.
+
+=== modified file 'gcc/ira-lives.c'
+Index: gcc-4.5/gcc/ira-lives.c
+===================================================================
+--- gcc-4.5.orig/gcc/ira-lives.c
++++ gcc-4.5/gcc/ira-lives.c
+@@ -805,6 +805,9 @@ ira_implicitly_set_insn_hard_regs (HARD_
+ ? GENERAL_REGS
+ : REG_CLASS_FROM_CONSTRAINT (c, p));
+ if (cl != NO_REGS
++ /* There is no register pressure problem if all of the
++ regs in this class are fixed. */
++ && ira_available_class_regs[cl] != 0
+ && (ira_available_class_regs[cl]
+ <= ira_reg_class_nregs[cl][mode]))
+ IOR_HARD_REG_SET (*set, reg_class_contents[cl]);
diff --git a/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99414.patch b/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99414.patch
new file mode 100644
index 0000000000..648ea5fa5e
--- /dev/null
+++ b/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99414.patch
@@ -0,0 +1,36 @@
+2010-10-15 Yao Qi <yao@codesourcery.com>
+
+ Backport from mainline:
+
+ 2010-10-14 Yao Qi <yao@codesourcery.com>
+
+ gcc/
+ PR target/45447
+ * config/arm/arm.c (arm_build_builtin_va_list): Assign
+ va_list_name to TYPE_STUB_DECL (va_list_type).
+
+ gcc/testsuite/
+ PR target/45447
+ * gcc.target/arm/pr45447.c: New test.
+
+=== modified file 'gcc/config/arm/arm.c'
+Index: gcc-4.5/gcc/config/arm/arm.c
+===================================================================
+--- gcc-4.5.orig/gcc/config/arm/arm.c
++++ gcc-4.5/gcc/config/arm/arm.c
+@@ -1166,6 +1166,7 @@ arm_build_builtin_va_list (void)
+ va_list_type);
+ DECL_ARTIFICIAL (va_list_name) = 1;
+ TYPE_NAME (va_list_type) = va_list_name;
++ TYPE_STUB_DECL (va_list_type) = va_list_name;
+ /* Create the __ap field. */
+ ap_field = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL,
+Index: gcc-4.5/gcc/testsuite/gcc.target/arm/pr45447.c
+===================================================================
+--- /dev/null
++++ gcc-4.5/gcc/testsuite/gcc.target/arm/pr45447.c
+@@ -0,0 +1,3 @@
++/* { dg-do compile } */
++/* { dg-options "-g -femit-struct-debug-baseonly" } */
++typedef __builtin_va_list x;
diff --git a/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99415.patch b/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99415.patch
new file mode 100644
index 0000000000..3622ac4238
--- /dev/null
+++ b/recipes/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99415.patch
@@ -0,0 +1,46 @@
+2010-10-13 Chung-Lin Tang <cltang@codesourcery.com>
+
+ Issue #8615
+
+ Backport from mainline:
+
+ 2010-10-12 Chung-Lin Tang <cltang@codesourcery.com>
+
+ gcc/
+ * config/arm/arm.h (ARM_EXPAND_ALIGNMENT): Rename from
+ DATA_ALIGNMENT and add COND parameter. Update comments above.
+ (DATA_ALIGNMENT): Use ARM_EXPAND_ALIGNMENT, with !optimize_size.
+ (LOCAL_ALIGNMENT): Use ARM_EXPAND_ALIGNMENT, with
+ !flag_conserve_stack.
+
+=== modified file 'gcc/config/arm/arm.h'
+Index: gcc-4.5/gcc/config/arm/arm.h
+===================================================================
+--- gcc-4.5.orig/gcc/config/arm/arm.h
++++ gcc-4.5/gcc/config/arm/arm.h
+@@ -596,15 +596,21 @@ extern int low_irq_latency;
+ /* Align definitions of arrays, unions and structures so that
+ initializations and copies can be made more efficient. This is not
+ ABI-changing, so it only affects places where we can see the
+- definition. */
+-#define DATA_ALIGNMENT(EXP, ALIGN) \
+- ((((ALIGN) < BITS_PER_WORD) \
++ definition. Increasing the alignment tends to introduce padding,
++ so don't do this when optimizing for size/conserving stack space. */
++#define ARM_EXPAND_ALIGNMENT(COND, EXP, ALIGN) \
++ (((COND) && ((ALIGN) < BITS_PER_WORD) \
+ && (TREE_CODE (EXP) == ARRAY_TYPE \
+ || TREE_CODE (EXP) == UNION_TYPE \
+ || TREE_CODE (EXP) == RECORD_TYPE)) ? BITS_PER_WORD : (ALIGN))
+
++/* Align global data. */
++#define DATA_ALIGNMENT(EXP, ALIGN) \
++ ARM_EXPAND_ALIGNMENT(!optimize_size, EXP, ALIGN)
++
+ /* Similarly, make sure that objects on the stack are sensibly aligned. */
+-#define LOCAL_ALIGNMENT(EXP, ALIGN) DATA_ALIGNMENT(EXP, ALIGN)
++#define LOCAL_ALIGNMENT(EXP, ALIGN) \
++ ARM_EXPAND_ALIGNMENT(!flag_conserve_stack, EXP, ALIGN)
+
+ /* Setting STRUCTURE_SIZE_BOUNDARY to 32 produces more efficient code, but the
+ value set in previous versions of this toolchain was 8, which produces more