Issue #1259 Backport from mainline: gcc/ 2010-10-22 Jie Zhang * expr.c (emit_group_load_1): Update calls to extract_bit_field. (copy_blkmode_from_reg): Likewise. (read_complex_part): Likewise. (expand_expr_real_1): Calculate packedp and pass it to extract_bit_field. * expr.h (extract_bit_field): Update declaration. * calls.c (store_unaligned_arguments_into_pseudos): Update call to extract_bit_field. * expmed.c (extract_fixed_bit_field): Update calls to extract_fixed_bit_field. (store_split_bit_field): Likewise. (extract_bit_field_1): Add new argument packedp. (extract_bit_field): Add new argument packedp. (extract_fixed_bit_field): Add new argument packedp and let packed attribute override volatile. * stmt.c (expand_return): Update call to extract_bit_field. 2010-10-15 Jie Zhang * doc/invoke.texi: Add -fstrict-volatile-bitfields to Option Summary and Index. 2010-07-13 DJ Delorie * config/h8300/h8300.c (h8300_init_once): Default to -fstrict_volatile_bitfields. * config/sh/sh.c (sh_override_options): Default to -fstrict_volatile_bitfields. * config/rx/rx.c (rx_option_override): New. * config/m32c/m32c.c (m32c_override_options): Default to -fstrict_volatile_bitfields. 2010-06-16 DJ Delorie * common.opt (-fstrict-volatile-bitfields): new. * doc/invoke.texi: Document it. * fold-const.c (optimize_bit_field_compare): For volatile bitfields, use the field's type to determine the mode, not the field's size. * expr.c (expand_assignment): Likewise. (get_inner_reference): Likewise. (expand_expr_real_1): Likewise. * expmed.c (store_fixed_bit_field): Likewise. (extract_bit_field_1): Likewise. (extract_fixed_bit_field): Likewise. gcc/testsuite/ 2010-08-19 Uros Bizjak PR testsuite/45324 * gcc.target/i386/volatile-bitfields-1.c: Also scan movb. 2010-06-16 DJ Delorie * gcc.target/i386/volatile-bitfields-1.c: New. * gcc.target/i386/volatile-bitfields-2.c: New. === modified file 'gcc/calls.c' --- old/gcc/calls.c 2010-10-04 00:50:43 +0000 +++ new/gcc/calls.c 2010-11-04 12:43:52 +0000 @@ -878,7 +878,7 @@ int bitsize = MIN (bytes * BITS_PER_UNIT, BITS_PER_WORD); args[i].aligned_regs[j] = reg; - word = extract_bit_field (word, bitsize, 0, 1, NULL_RTX, + word = extract_bit_field (word, bitsize, 0, 1, false, NULL_RTX, word_mode, word_mode); /* There is no need to restrict this code to loading items === modified file 'gcc/common.opt' --- old/gcc/common.opt 2010-07-29 14:59:35 +0000 +++ new/gcc/common.opt 2010-11-04 12:43:52 +0000 @@ -613,6 +613,10 @@ Common Report Var(flag_loop_block) Optimization Enable Loop Blocking transformation +fstrict-volatile-bitfields +Common Report Var(flag_strict_volatile_bitfields) Init(-1) +Force bitfield accesses to match their type width + fguess-branch-probability Common Report Var(flag_guess_branch_prob) Optimization Enable guessing of branch probabilities === modified file 'gcc/config/h8300/h8300.c' --- old/gcc/config/h8300/h8300.c 2010-04-02 18:54:46 +0000 +++ new/gcc/config/h8300/h8300.c 2010-11-04 12:43:52 +0000 @@ -403,6 +403,10 @@ restore er6 though, so bump up the cost. */ h8300_move_ratio = 6; } + + /* This target defaults to strict volatile bitfields. */ + if (flag_strict_volatile_bitfields < 0) + flag_strict_volatile_bitfields = 1; } /* Implement REG_CLASS_FROM_LETTER. === modified file 'gcc/config/m32c/m32c.c' --- old/gcc/config/m32c/m32c.c 2009-10-22 18:46:26 +0000 +++ new/gcc/config/m32c/m32c.c 2010-11-04 12:43:52 +0000 @@ -428,6 +428,10 @@ if (TARGET_A24) flag_ivopts = 0; + + /* This target defaults to strict volatile bitfields. */ + if (flag_strict_volatile_bitfields < 0) + flag_strict_volatile_bitfields = 1; } /* Defining data structures for per-function information */ === modified file 'gcc/config/rx/rx.c' --- old/gcc/config/rx/rx.c 2010-07-27 14:39:53 +0000 +++ new/gcc/config/rx/rx.c 2010-11-04 12:43:52 +0000 @@ -2187,6 +2187,14 @@ } } +static void +rx_option_override (void) +{ + /* This target defaults to strict volatile bitfields. */ + if (flag_strict_volatile_bitfields < 0) + flag_strict_volatile_bitfields = 1; +} + static bool rx_allocate_stack_slots_for_args (void) @@ -2759,6 +2767,9 @@ #undef TARGET_CC_MODES_COMPATIBLE #define TARGET_CC_MODES_COMPATIBLE rx_cc_modes_compatible +#undef TARGET_OPTION_OVERRIDE +#define TARGET_OPTION_OVERRIDE rx_option_override + struct gcc_target targetm = TARGET_INITIALIZER; /* #include "gt-rx.h" */ === modified file 'gcc/config/sh/sh.c' --- old/gcc/config/sh/sh.c 2010-05-05 21:12:17 +0000 +++ new/gcc/config/sh/sh.c 2010-11-04 12:43:52 +0000 @@ -950,6 +950,10 @@ if (sh_fixed_range_str) sh_fix_range (sh_fixed_range_str); + + /* This target defaults to strict volatile bitfields. */ + if (flag_strict_volatile_bitfields < 0) + flag_strict_volatile_bitfields = 1; } /* Print the operand address in x to the stream. */ === modified file 'gcc/doc/invoke.texi' --- old/gcc/doc/invoke.texi 2010-10-04 00:50:43 +0000 +++ new/gcc/doc/invoke.texi 2010-11-04 12:43:52 +0000 @@ -922,7 +922,7 @@ -fargument-noalias-global -fargument-noalias-anything @gol -fleading-underscore -ftls-model=@var{model} @gol -ftrapv -fwrapv -fbounds-check @gol --fvisibility} +-fvisibility -fstrict-volatile-bitfields} @end table @menu @@ -17629,6 +17629,33 @@ An overview of these techniques, their benefits and how to use them is at @w{@uref{http://gcc.gnu.org/wiki/Visibility}}. +@item -fstrict-volatile-bitfields +@opindex fstrict-volatile-bitfields +This option should be used if accesses to volatile bitfields (or other +structure fields, although the compiler usually honors those types +anyway) should use a single access in a mode of the same size as the +container's type, aligned to a natural alignment if possible. For +example, targets with memory-mapped peripheral registers might require +all such accesses to be 16 bits wide; with this flag the user could +declare all peripheral bitfields as ``unsigned short'' (assuming short +is 16 bits on these targets) to force GCC to use 16 bit accesses +instead of, perhaps, a more efficient 32 bit access. + +If this option is disabled, the compiler will use the most efficient +instruction. In the previous example, that might be a 32-bit load +instruction, even though that will access bytes that do not contain +any portion of the bitfield, or memory-mapped registers unrelated to +the one being updated. + +If the target requires strict alignment, and honoring the container +type would require violating this alignment, a warning is issued. +However, the access happens as the user requested, under the +assumption that the user knows something about the target hardware +that GCC is unaware of. + +The default value of this option is determined by the application binary +interface for the target processor. + @end table @c man end === modified file 'gcc/expmed.c' --- old/gcc/expmed.c 2010-10-04 00:50:43 +0000 +++ new/gcc/expmed.c 2010-11-04 12:43:52 +0000 @@ -47,7 +47,7 @@ static rtx extract_fixed_bit_field (enum machine_mode, rtx, unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, - unsigned HOST_WIDE_INT, rtx, int); + unsigned HOST_WIDE_INT, rtx, int, bool); static rtx mask_rtx (enum machine_mode, int, int, int); static rtx lshift_value (enum machine_mode, rtx, int, int); static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT, @@ -904,8 +904,14 @@ if (GET_MODE_BITSIZE (mode) == 0 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode)) mode = word_mode; - mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT, - MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0)); + + if (MEM_VOLATILE_P (op0) + && GET_MODE_BITSIZE (GET_MODE (op0)) > 0 + && flag_strict_volatile_bitfields > 0) + mode = GET_MODE (op0); + else + mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT, + MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0)); if (mode == VOIDmode) { @@ -1099,7 +1105,7 @@ endianness compensation) to fetch the piece we want. */ part = extract_fixed_bit_field (word_mode, value, 0, thissize, total_bits - bitsize + bitsdone, - NULL_RTX, 1); + NULL_RTX, 1, false); } else { @@ -1110,7 +1116,7 @@ & (((HOST_WIDE_INT) 1 << thissize) - 1)); else part = extract_fixed_bit_field (word_mode, value, 0, thissize, - bitsdone, NULL_RTX, 1); + bitsdone, NULL_RTX, 1, false); } /* If OP0 is a register, then handle OFFSET here. @@ -1176,7 +1182,8 @@ static rtx extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, - unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target, + unsigned HOST_WIDE_INT bitnum, + int unsignedp, bool packedp, rtx target, enum machine_mode mode, enum machine_mode tmode, bool fallback_p) { @@ -1378,6 +1385,14 @@ ? mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0) : mode); + /* If the bitfield is volatile, we need to make sure the access + remains on a type-aligned boundary. */ + if (GET_CODE (op0) == MEM + && MEM_VOLATILE_P (op0) + && GET_MODE_BITSIZE (GET_MODE (op0)) > 0 + && flag_strict_volatile_bitfields > 0) + goto no_subreg_mode_swap; + if (((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode) && bitpos % BITS_PER_WORD == 0) || (mode1 != BLKmode @@ -1450,7 +1465,7 @@ rtx result_part = extract_bit_field (op0, MIN (BITS_PER_WORD, bitsize - i * BITS_PER_WORD), - bitnum + bit_offset, 1, target_part, mode, + bitnum + bit_offset, 1, false, target_part, mode, word_mode); gcc_assert (target_part); @@ -1649,7 +1664,7 @@ xop0 = adjust_address (op0, bestmode, xoffset); xop0 = force_reg (bestmode, xop0); result = extract_bit_field_1 (xop0, bitsize, xbitpos, - unsignedp, target, + unsignedp, packedp, target, mode, tmode, false); if (result) return result; @@ -1663,7 +1678,7 @@ return NULL; target = extract_fixed_bit_field (int_mode, op0, offset, bitsize, - bitpos, target, unsignedp); + bitpos, target, unsignedp, packedp); return convert_extracted_bit_field (target, mode, tmode, unsignedp); } @@ -1674,6 +1689,7 @@ STR_RTX is the structure containing the byte (a REG or MEM). UNSIGNEDP is nonzero if this is an unsigned bit field. + PACKEDP is nonzero if the field has the packed attribute. MODE is the natural mode of the field value once extracted. TMODE is the mode the caller would like the value to have; but the value may be returned with type MODE instead. @@ -1685,10 +1701,10 @@ rtx extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, - unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target, - enum machine_mode mode, enum machine_mode tmode) + unsigned HOST_WIDE_INT bitnum, int unsignedp, bool packedp, + rtx target, enum machine_mode mode, enum machine_mode tmode) { - return extract_bit_field_1 (str_rtx, bitsize, bitnum, unsignedp, + return extract_bit_field_1 (str_rtx, bitsize, bitnum, unsignedp, packedp, target, mode, tmode, true); } @@ -1704,6 +1720,8 @@ which is significant on bigendian machines.) UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value). + PACKEDP is true if the field has the packed attribute. + If TARGET is nonzero, attempts to store the value there and return TARGET, but this is not guaranteed. If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */ @@ -1713,7 +1731,7 @@ unsigned HOST_WIDE_INT offset, unsigned HOST_WIDE_INT bitsize, unsigned HOST_WIDE_INT bitpos, rtx target, - int unsignedp) + int unsignedp, bool packedp) { unsigned int total_bits = BITS_PER_WORD; enum machine_mode mode; @@ -1730,8 +1748,19 @@ includes the entire field. If such a mode would be larger than a word, we won't be doing the extraction the normal way. */ - mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT, - MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0)); + if (MEM_VOLATILE_P (op0) + && flag_strict_volatile_bitfields > 0) + { + if (GET_MODE_BITSIZE (GET_MODE (op0)) > 0) + mode = GET_MODE (op0); + else if (target && GET_MODE_BITSIZE (GET_MODE (target)) > 0) + mode = GET_MODE (target); + else + mode = tmode; + } + else + mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT, + MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0)); if (mode == VOIDmode) /* The only way this should occur is if the field spans word @@ -1752,12 +1781,67 @@ * BITS_PER_UNIT); } - /* Get ref to an aligned byte, halfword, or word containing the field. - Adjust BITPOS to be position within a word, - and OFFSET to be the offset of that word. - Then alter OP0 to refer to that word. */ - bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT; - offset -= (offset % (total_bits / BITS_PER_UNIT)); + /* If we're accessing a volatile MEM, we can't do the next + alignment step if it results in a multi-word access where we + otherwise wouldn't have one. So, check for that case + here. */ + if (MEM_P (op0) + && MEM_VOLATILE_P (op0) + && flag_strict_volatile_bitfields > 0 + && bitpos + bitsize <= total_bits + && bitpos + bitsize + (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT > total_bits) + { + if (STRICT_ALIGNMENT) + { + static bool informed_about_misalignment = false; + bool warned; + + if (packedp) + { + if (bitsize == total_bits) + warned = warning_at (input_location, OPT_fstrict_volatile_bitfields, + "multiple accesses to volatile structure member" + " because of packed attribute"); + else + warned = warning_at (input_location, OPT_fstrict_volatile_bitfields, + "multiple accesses to volatile structure bitfield" + " because of packed attribute"); + + return extract_split_bit_field (op0, bitsize, + bitpos + offset * BITS_PER_UNIT, + unsignedp); + } + + if (bitsize == total_bits) + warned = warning_at (input_location, OPT_fstrict_volatile_bitfields, + "mis-aligned access used for structure member"); + else + warned = warning_at (input_location, OPT_fstrict_volatile_bitfields, + "mis-aligned access used for structure bitfield"); + + if (! informed_about_misalignment && warned) + { + informed_about_misalignment = true; + inform (input_location, + "When a volatile object spans multiple type-sized locations," + " the compiler must choose between using a single mis-aligned access to" + " preserve the volatility, or using multiple aligned accesses to avoid" + " runtime faults. This code may fail at runtime if the hardware does" + " not allow this access."); + } + } + } + else + { + + /* Get ref to an aligned byte, halfword, or word containing the field. + Adjust BITPOS to be position within a word, + and OFFSET to be the offset of that word. + Then alter OP0 to refer to that word. */ + bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT; + offset -= (offset % (total_bits / BITS_PER_UNIT)); + } + op0 = adjust_address (op0, mode, offset); } @@ -1966,7 +2050,7 @@ extract_fixed_bit_field wants offset in bytes. */ part = extract_fixed_bit_field (word_mode, word, offset * unit / BITS_PER_UNIT, - thissize, thispos, 0, 1); + thissize, thispos, 0, 1, false); bitsdone += thissize; /* Shift this part into place for the result. */ === modified file 'gcc/expr.c' --- old/gcc/expr.c 2010-10-04 00:50:43 +0000 +++ new/gcc/expr.c 2010-11-04 12:43:52 +0000 @@ -1749,7 +1749,7 @@ && (!REG_P (tmps[i]) || GET_MODE (tmps[i]) != mode)) tmps[i] = extract_bit_field (tmps[i], bytelen * BITS_PER_UNIT, (bytepos % slen0) * BITS_PER_UNIT, - 1, NULL_RTX, mode, mode); + 1, false, NULL_RTX, mode, mode); } else { @@ -1759,7 +1759,7 @@ mem = assign_stack_temp (GET_MODE (src), slen, 0); emit_move_insn (mem, src); tmps[i] = extract_bit_field (mem, bytelen * BITS_PER_UNIT, - 0, 1, NULL_RTX, mode, mode); + 0, 1, false, NULL_RTX, mode, mode); } } /* FIXME: A SIMD parallel will eventually lead to a subreg of a @@ -1800,7 +1800,7 @@ tmps[i] = src; else tmps[i] = extract_bit_field (src, bytelen * BITS_PER_UNIT, - bytepos * BITS_PER_UNIT, 1, NULL_RTX, + bytepos * BITS_PER_UNIT, 1, false, NULL_RTX, mode, mode); if (shift) @@ -2213,7 +2213,7 @@ bitpos for the destination store (left justified). */ store_bit_field (dst, bitsize, bitpos % BITS_PER_WORD, copy_mode, extract_bit_field (src, bitsize, - xbitpos % BITS_PER_WORD, 1, + xbitpos % BITS_PER_WORD, 1, false, NULL_RTX, copy_mode, copy_mode)); } @@ -2970,7 +2970,7 @@ } return extract_bit_field (cplx, ibitsize, imag_p ? ibitsize : 0, - true, NULL_RTX, imode, imode); + true, false, NULL_RTX, imode, imode); } /* A subroutine of emit_move_insn_1. Yet another lowpart generator. @@ -4233,6 +4233,13 @@ to_rtx = expand_normal (tem); + /* If the bitfield is volatile, we want to access it in the + field's mode, not the computed mode. */ + if (volatilep + && GET_CODE (to_rtx) == MEM + && flag_strict_volatile_bitfields > 0) + to_rtx = adjust_address (to_rtx, mode1, 0); + if (offset != 0) { enum machine_mode address_mode; @@ -5993,6 +6000,12 @@ mode = DECL_MODE (field); else if (DECL_MODE (field) == BLKmode) blkmode_bitfield = true; + else if (TREE_THIS_VOLATILE (exp) + && flag_strict_volatile_bitfields > 0) + /* Volatile bitfields should be accessed in the mode of the + field's type, not the mode computed based on the bit + size. */ + mode = TYPE_MODE (DECL_BIT_FIELD_TYPE (field)); *punsignedp = DECL_UNSIGNED (field); } @@ -8848,6 +8861,7 @@ HOST_WIDE_INT bitsize, bitpos; tree offset; int volatilep = 0, must_force_mem; + bool packedp = false; tree tem = get_inner_reference (exp, &bitsize, &bitpos, &offset, &mode1, &unsignedp, &volatilep, true); rtx orig_op0, memloc; @@ -8857,6 +8871,11 @@ infinitely recurse. */ gcc_assert (tem != exp); + if (TYPE_PACKED (TREE_TYPE (TREE_OPERAND (exp, 0))) + || (TREE_CODE (TREE_OPERAND (exp, 1)) == FIELD_DECL + && DECL_PACKED (TREE_OPERAND (exp, 1)))) + packedp = true; + /* If TEM's type is a union of variable size, pass TARGET to the inner computation, since it will need a temporary and TARGET is known to have to do. This occurs in unchecked conversion in Ada. */ @@ -8873,6 +8892,14 @@ || modifier == EXPAND_STACK_PARM) ? modifier : EXPAND_NORMAL); + + /* If the bitfield is volatile, we want to access it in the + field's mode, not the computed mode. */ + if (volatilep + && GET_CODE (op0) == MEM + && flag_strict_volatile_bitfields > 0) + op0 = adjust_address (op0, mode1, 0); + mode2 = CONSTANT_P (op0) ? TYPE_MODE (TREE_TYPE (tem)) : GET_MODE (op0); @@ -8998,6 +9025,9 @@ && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT && modifier != EXPAND_CONST_ADDRESS && modifier != EXPAND_INITIALIZER) + /* If the field is volatile, we always want an aligned + access. */ + || (volatilep && flag_strict_volatile_bitfields > 0) /* If the field isn't aligned enough to fetch as a memref, fetch it as a bit field. */ || (mode1 != BLKmode @@ -9058,7 +9088,7 @@ if (MEM_P (op0) && REG_P (XEXP (op0, 0))) mark_reg_pointer (XEXP (op0, 0), MEM_ALIGN (op0)); - op0 = extract_bit_field (op0, bitsize, bitpos, unsignedp, + op0 = extract_bit_field (op0, bitsize, bitpos, unsignedp, packedp, (modifier == EXPAND_STACK_PARM ? NULL_RTX : target), ext_mode, ext_mode); === modified file 'gcc/expr.h' --- old/gcc/expr.h 2010-02-19 09:53:51 +0000 +++ new/gcc/expr.h 2010-11-04 12:43:52 +0000 @@ -802,7 +802,7 @@ extern void store_bit_field (rtx, unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, enum machine_mode, rtx); extern rtx extract_bit_field (rtx, unsigned HOST_WIDE_INT, - unsigned HOST_WIDE_INT, int, rtx, + unsigned HOST_WIDE_INT, int, bool, rtx, enum machine_mode, enum machine_mode); extern rtx extract_low_bits (enum machine_mode, enum machine_mode, rtx); extern rtx expand_mult (enum machine_mode, rtx, rtx, rtx, int); === modified file 'gcc/fold-const.c' --- old/gcc/fold-const.c 2010-10-04 00:50:43 +0000 +++ new/gcc/fold-const.c 2010-11-04 12:43:52 +0000 @@ -4208,11 +4208,16 @@ /* See if we can find a mode to refer to this field. We should be able to, but fail if we can't. */ - nmode = get_best_mode (lbitsize, lbitpos, - const_p ? TYPE_ALIGN (TREE_TYPE (linner)) - : MIN (TYPE_ALIGN (TREE_TYPE (linner)), - TYPE_ALIGN (TREE_TYPE (rinner))), - word_mode, lvolatilep || rvolatilep); + if (lvolatilep + && GET_MODE_BITSIZE (lmode) > 0 + && flag_strict_volatile_bitfields > 0) + nmode = lmode; + else + nmode = get_best_mode (lbitsize, lbitpos, + const_p ? TYPE_ALIGN (TREE_TYPE (linner)) + : MIN (TYPE_ALIGN (TREE_TYPE (linner)), + TYPE_ALIGN (TREE_TYPE (rinner))), + word_mode, lvolatilep || rvolatilep); if (nmode == VOIDmode) return 0; === modified file 'gcc/stmt.c' --- old/gcc/stmt.c 2010-08-13 11:53:46 +0000 +++ new/gcc/stmt.c 2010-11-04 12:43:52 +0000 @@ -1751,7 +1751,7 @@ xbitpos for the destination store (right justified). */ store_bit_field (dst, bitsize, xbitpos % BITS_PER_WORD, word_mode, extract_bit_field (src, bitsize, - bitpos % BITS_PER_WORD, 1, + bitpos % BITS_PER_WORD, 1, false, NULL_RTX, word_mode, word_mode)); } === added file 'gcc/testsuite/gcc.target/i386/volatile-bitfields-1.c' --- old/gcc/testsuite/gcc.target/i386/volatile-bitfields-1.c 1970-01-01 00:00:00 +0000 +++ new/gcc/testsuite/gcc.target/i386/volatile-bitfields-1.c 2010-11-04 12:43:52 +0000 @@ -0,0 +1,17 @@ +/* { dg-do compile } */ +/* { dg-options "-O2 -fstrict-volatile-bitfields" } */ + +typedef struct { + char a:1; + char b:7; + int c; +} BitStruct; + +volatile BitStruct bits; + +int foo () +{ + return bits.b; +} + +/* { dg-final { scan-assembler "mov(b|zbl).*bits" } } */ === added file 'gcc/testsuite/gcc.target/i386/volatile-bitfields-2.c' --- old/gcc/testsuite/gcc.target/i386/volatile-bitfields-2.c 1970-01-01 00:00:00 +0000 +++ new/gcc/testsuite/gcc.target/i386/volatile-bitfields-2.c 2010-11-04 12:43:52 +0000 @@ -0,0 +1,17 @@ +/* { dg-do compile } */ +/* { dg-options "-O2 -fno-strict-volatile-bitfields" } */ + +typedef struct { + char a:1; + char b:7; + int c; +} BitStruct; + +volatile BitStruct bits; + +int foo () +{ + return bits.b; +} + +/* { dg-final { scan-assembler "movl.*bits" } } */