代码拉取完成,页面将自动刷新
diff -urpN a/gcc/cfgexpand.c b/gcc/cfgexpand.c
--- a/gcc/cfgexpand.c 2019-05-30 16:58:45.350508770 +0800
+++ b/gcc/cfgexpand.c 2019-05-30 11:53:13.315156625 +0800
@@ -6094,6 +6094,23 @@ stack_protect_prologue (void)
rtx x, y;
x = expand_normal (crtl->stack_protect_guard);
+
+ if (targetm.have_stack_protect_combined_set () && guard_decl)
+ {
+ gcc_assert (DECL_P (guard_decl));
+ y = DECL_RTL (guard_decl);
+
+ /* Allow the target to compute address of Y and copy it to X without
+ leaking Y into a register. This combined address + copy pattern
+ allows the target to prevent spilling of any intermediate results by
+ splitting it after register allocator. */
+ if (rtx_insn *insn = targetm.gen_stack_protect_combined_set (x, y))
+ {
+ emit_insn (insn);
+ return;
+ }
+ }
+
if (guard_decl)
y = expand_normal (guard_decl);
else
diff -urpN a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
--- a/gcc/config/arm/arm.c 2019-05-30 16:58:45.354508770 +0800
+++ b/gcc/config/arm/arm.c 2019-05-30 16:59:05.058508073 +0800
@@ -7236,21 +7236,34 @@ legitimate_pic_operand_p (rtx x)
return 1;
}
-/* Record that the current function needs a PIC register. Initialize
- cfun->machine->pic_reg if we have not already done so. */
+/* Record that the current function needs a PIC register. If PIC_REG is null,
+ a new pseudo is allocated as PIC register, otherwise PIC_REG is used. In
+ both case cfun->machine->pic_reg is initialized if we have not already done
+ so. COMPUTE_NOW decide whether and where to set the PIC register. If true,
+ PIC register is reloaded in the current position of the instruction stream
+ irregardless of whether it was loaded before. Otherwise, it is only loaded
+ if not already done so (crtl->uses_pic_offset_table is null). Note that
+ nonnull PIC_REG is only supported iff COMPUTE_NOW is true and null PIC_REG
+ is only supported iff COMPUTE_NOW is false. */
static void
-require_pic_register (void)
+require_pic_register (rtx pic_reg, bool compute_now)
{
+ gcc_assert (compute_now == (pic_reg != NULL_RTX));
+
/* A lot of the logic here is made obscure by the fact that this
routine gets called as part of the rtx cost estimation process.
We don't want those calls to affect any assumptions about the real
function; and further, we can't call entry_of_function() until we
start the real expansion process. */
- if (!crtl->uses_pic_offset_table)
+ if (!crtl->uses_pic_offset_table || compute_now)
{
- gcc_assert (can_create_pseudo_p ());
+ gcc_assert (can_create_pseudo_p ()
+ || (pic_reg != NULL_RTX
+ && REG_P (pic_reg)
+ && GET_MODE (pic_reg) == Pmode));
if (arm_pic_register != INVALID_REGNUM
+ && !compute_now
&& !(TARGET_THUMB1 && arm_pic_register > LAST_LO_REGNUM))
{
if (!cfun->machine->pic_reg)
@@ -7266,8 +7279,19 @@ require_pic_register (void)
{
rtx_insn *seq, *insn;
- if (!cfun->machine->pic_reg)
- cfun->machine->pic_reg = gen_reg_rtx (Pmode);
+ if (pic_reg == NULL_RTX && cfun->machine->pic_reg == NULL_RTX)
+ {
+ pic_reg = gen_reg_rtx (Pmode);
+ cfun->machine->pic_reg = pic_reg;
+ }
+ else if (pic_reg == NULL_RTX)
+ {
+ pic_reg = cfun->machine->pic_reg;
+ }
+ else if (cfun->machine->pic_reg == NULL_RTX)
+ {
+ cfun->machine->pic_reg = pic_reg;
+ }
/* Play games to avoid marking the function as needing pic
if we are being called as part of the cost-estimation
@@ -7278,11 +7306,12 @@ require_pic_register (void)
start_sequence ();
if (TARGET_THUMB1 && arm_pic_register != INVALID_REGNUM
- && arm_pic_register > LAST_LO_REGNUM)
+ && arm_pic_register > LAST_LO_REGNUM
+ && !compute_now)
emit_move_insn (cfun->machine->pic_reg,
gen_rtx_REG (Pmode, arm_pic_register));
else
- arm_load_pic_register (0UL);
+ arm_load_pic_register (0UL, pic_reg);
seq = get_insns ();
end_sequence ();
@@ -7295,16 +7324,33 @@ require_pic_register (void)
we can't yet emit instructions directly in the final
insn stream. Queue the insns on the entry edge, they will
be committed after everything else is expanded. */
- insert_insn_on_edge (seq,
- single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
+ if (currently_expanding_to_rtl)
+ insert_insn_on_edge (seq,
+ single_succ_edge
+ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
+ else
+ emit_insn (seq);
}
}
}
}
+/* Legitimize PIC load to ORIG into REG. If REG is NULL, a new pseudo is
+ created to hold the result of the load. If not NULL, PIC_REG indicates
+ which register to use as PIC register, otherwise it is decided by register
+ allocator. COMPUTE_NOW forces the PIC register to be loaded at the current
+ location in the instruction stream, irregardless of whether it was loaded
+ previously. Note that nonnull PIC_REG is only supported iff COMPUTE_NOW is
+ true and null PIC_REG is only supported iff COMPUTE_NOW is false.
+
+ Returns the register REG into which the PIC load is performed. */
+
rtx
-legitimize_pic_address (rtx orig, machine_mode mode, rtx reg)
+legitimize_pic_address (rtx orig, machine_mode mode, rtx reg, rtx pic_reg,
+ bool compute_now)
{
+ gcc_assert (compute_now == (pic_reg != NULL_RTX));
+
if (GET_CODE (orig) == SYMBOL_REF
|| GET_CODE (orig) == LABEL_REF)
{
@@ -7337,9 +7383,12 @@ legitimize_pic_address (rtx orig, machin
rtx mem;
/* If this function doesn't have a pic register, create one now. */
- require_pic_register ();
+ require_pic_register (pic_reg, compute_now);
+
+ if (pic_reg == NULL_RTX)
+ pic_reg = cfun->machine->pic_reg;
- pat = gen_calculate_pic_address (reg, cfun->machine->pic_reg, orig);
+ pat = gen_calculate_pic_address (reg, pic_reg, orig);
/* Make the MEM as close to a constant as possible. */
mem = SET_SRC (pat);
@@ -7388,9 +7437,11 @@ legitimize_pic_address (rtx orig, machin
gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
- base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
+ base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg,
+ pic_reg, compute_now);
offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
- base == reg ? 0 : reg);
+ base == reg ? 0 : reg, pic_reg,
+ compute_now);
if (CONST_INT_P (offset))
{
@@ -7490,16 +7541,17 @@ static GTY(()) int pic_labelno;
low register. */
void
-arm_load_pic_register (unsigned long saved_regs ATTRIBUTE_UNUSED)
+arm_load_pic_register (unsigned long saved_regs ATTRIBUTE_UNUSED, rtx pic_reg)
{
- rtx l1, labelno, pic_tmp, pic_rtx, pic_reg;
+ rtx l1, labelno, pic_tmp, pic_rtx;
if (crtl->uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
return;
gcc_assert (flag_pic);
- pic_reg = cfun->machine->pic_reg;
+ if (pic_reg == NULL_RTX)
+ pic_reg = cfun->machine->pic_reg;
if (TARGET_VXWORKS_RTP)
{
pic_rtx = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE);
@@ -8558,7 +8610,8 @@ arm_legitimize_address (rtx x, rtx orig_
{
/* We need to find and carefully transform any SYMBOL and LABEL
references; so go back to the original address expression. */
- rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
+ rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX, NULL_RTX,
+ false /*compute_now*/);
if (new_x != orig_x)
x = new_x;
@@ -8626,7 +8679,8 @@ thumb_legitimize_address (rtx x, rtx ori
{
/* We need to find and carefully transform any SYMBOL and LABEL
references; so go back to the original address expression. */
- rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
+ rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX, NULL_RTX,
+ false /*compute_now*/);
if (new_x != orig_x)
x = new_x;
@@ -17800,7 +17854,7 @@ arm_emit_call_insn (rtx pat, rtx addr, b
? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
: !SYMBOL_REF_LOCAL_P (addr)))
{
- require_pic_register ();
+ require_pic_register (NULL_RTX, false /*compute_now*/);
use_reg (&CALL_INSN_FUNCTION_USAGE (insn), cfun->machine->pic_reg);
}
@@ -21706,7 +21760,7 @@ arm_expand_prologue (void)
mask &= THUMB2_WORK_REGS;
if (!IS_NESTED (func_type))
mask |= (1 << IP_REGNUM);
- arm_load_pic_register (mask);
+ arm_load_pic_register (mask, NULL_RTX);
}
/* If we are profiling, make sure no instructions are scheduled before
@@ -24909,7 +24963,7 @@ thumb1_expand_prologue (void)
/* Load the pic register before setting the frame pointer,
so we can use r7 as a temporary work register. */
if (flag_pic && arm_pic_register != INVALID_REGNUM)
- arm_load_pic_register (live_regs_mask);
+ arm_load_pic_register (live_regs_mask, NULL_RTX);
if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
emit_move_insn (gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM),
diff -urpN a/gcc/config/arm/arm.md b/gcc/config/arm/arm.md
--- a/gcc/config/arm/arm.md 2019-05-30 16:58:45.358508769 +0800
+++ b/gcc/config/arm/arm.md 2019-05-30 11:52:58.491157149 +0800
@@ -6051,7 +6051,8 @@
operands[1] = legitimize_pic_address (operands[1], SImode,
(!can_create_pseudo_p ()
? operands[0]
- : 0));
+ : NULL_RTX), NULL_RTX,
+ false /*compute_now*/);
}
"
)
@@ -6340,7 +6341,7 @@
/* r3 is clobbered by set/longjmp, so we can use it as a scratch
register. */
if (arm_pic_register != INVALID_REGNUM)
- arm_load_pic_register (1UL << 3);
+ arm_load_pic_register (1UL << 3, NULL_RTX);
DONE;
}")
@@ -8666,6 +8667,164 @@
(set_attr "conds" "clob")]
)
+;; Named patterns for stack smashing protection.
+(define_expand "stack_protect_combined_set"
+ [(parallel
+ [(set (match_operand:SI 0 "memory_operand" "")
+ (unspec:SI [(match_operand:SI 1 "guard_operand" "")]
+ UNSPEC_SP_SET))
+ (clobber (match_scratch:SI 2 ""))
+ (clobber (match_scratch:SI 3 ""))])]
+ ""
+ ""
+)
+
+;; Use a separate insn from the above expand to be able to have the mem outside
+;; the operand #1 when register allocation comes. This is needed to avoid LRA
+;; try to reload the guard since we need to control how PIC access is done in
+;; the -fpic/-fPIC case (see COMPUTE_NOW parameter when calling
+;; legitimize_pic_address ()).
+(define_insn_and_split "*stack_protect_combined_set_insn"
+ [(set (match_operand:SI 0 "memory_operand" "=m,m")
+ (unspec:SI [(mem:SI (match_operand:SI 1 "guard_addr_operand" "X,X"))]
+ UNSPEC_SP_SET))
+ (clobber (match_scratch:SI 2 "=&l,&r"))
+ (clobber (match_scratch:SI 3 "=&l,&r"))]
+ ""
+ "#"
+ "reload_completed"
+ [(parallel [(set (match_dup 0) (unspec:SI [(mem:SI (match_dup 2))]
+ UNSPEC_SP_SET))
+ (clobber (match_dup 2))])]
+ "
+{
+ if (flag_pic)
+ {
+ /* Forces recomputing of GOT base now. */
+ legitimize_pic_address (operands[1], SImode, operands[2], operands[3],
+ true /*compute_now*/);
+ }
+ else
+ {
+ if (address_operand (operands[1], SImode))
+ operands[2] = operands[1];
+ else
+ {
+ rtx mem = XEXP (force_const_mem (SImode, operands[1]), 0);
+ emit_move_insn (operands[2], mem);
+ }
+ }
+}"
+ [(set_attr "arch" "t1,32")]
+)
+
+(define_insn "*stack_protect_set_insn"
+ [(set (match_operand:SI 0 "memory_operand" "=m,m")
+ (unspec:SI [(mem:SI (match_operand:SI 1 "register_operand" "+&l,&r"))]
+ UNSPEC_SP_SET))
+ (clobber (match_dup 1))]
+ ""
+ "@
+ ldr\\t%1, [%1]\;str\\t%1, %0\;movs\t%1,#0
+ ldr\\t%1, [%1]\;str\\t%1, %0\;mov\t%1,#0"
+ [(set_attr "length" "8,12")
+ (set_attr "conds" "clob,nocond")
+ (set_attr "type" "multiple")
+ (set_attr "arch" "t1,32")]
+)
+
+(define_expand "stack_protect_combined_test"
+ [(parallel
+ [(set (pc)
+ (if_then_else
+ (eq (match_operand:SI 0 "memory_operand" "")
+ (unspec:SI [(match_operand:SI 1 "guard_operand" "")]
+ UNSPEC_SP_TEST))
+ (label_ref (match_operand 2))
+ (pc)))
+ (clobber (match_scratch:SI 3 ""))
+ (clobber (match_scratch:SI 4 ""))
+ (clobber (reg:CC CC_REGNUM))])]
+ ""
+ ""
+)
+
+;; Use a separate insn from the above expand to be able to have the mem outside
+;; the operand #1 when register allocation comes. This is needed to avoid LRA
+;; try to reload the guard since we need to control how PIC access is done in
+;; the -fpic/-fPIC case (see COMPUTE_NOW parameter when calling
+;; legitimize_pic_address ()).
+(define_insn_and_split "*stack_protect_combined_test_insn"
+ [(set (pc)
+ (if_then_else
+ (eq (match_operand:SI 0 "memory_operand" "m,m")
+ (unspec:SI [(mem:SI (match_operand:SI 1 "guard_addr_operand" "X,X"))]
+ UNSPEC_SP_TEST))
+ (label_ref (match_operand 2))
+ (pc)))
+ (clobber (match_scratch:SI 3 "=&l,&r"))
+ (clobber (match_scratch:SI 4 "=&l,&r"))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ "#"
+ "reload_completed"
+ [(const_int 0)]
+{
+ rtx eq;
+
+ if (flag_pic)
+ {
+ /* Forces recomputing of GOT base now. */
+ legitimize_pic_address (operands[1], SImode, operands[3], operands[4],
+ true /*compute_now*/);
+ }
+ else
+ {
+ if (address_operand (operands[1], SImode))
+ operands[3] = operands[1];
+ else
+ {
+ rtx mem = XEXP (force_const_mem (SImode, operands[1]), 0);
+ emit_move_insn (operands[3], mem);
+ }
+ }
+ if (TARGET_32BIT)
+ {
+ emit_insn (gen_arm_stack_protect_test_insn (operands[4], operands[0],
+ operands[3]));
+ rtx cc_reg = gen_rtx_REG (CC_Zmode, CC_REGNUM);
+ eq = gen_rtx_EQ (CC_Zmode, cc_reg, const0_rtx);
+ emit_jump_insn (gen_arm_cond_branch (operands[2], eq, cc_reg));
+ }
+ else
+ {
+ emit_insn (gen_thumb1_stack_protect_test_insn (operands[4], operands[0],
+ operands[3]));
+ eq = gen_rtx_EQ (VOIDmode, operands[4], const0_rtx);
+ emit_jump_insn (gen_cbranchsi4 (eq, operands[4], const0_rtx,
+ operands[2]));
+ }
+ DONE;
+}
+ [(set_attr "arch" "t1,32")]
+)
+
+(define_insn "arm_stack_protect_test_insn"
+ [(set (reg:CC_Z CC_REGNUM)
+ (compare:CC_Z (unspec:SI [(match_operand:SI 1 "memory_operand" "m,m")
+ (mem:SI (match_operand:SI 2 "register_operand" "+l,r"))]
+ UNSPEC_SP_TEST)
+ (const_int 0)))
+ (clobber (match_operand:SI 0 "register_operand" "=&l,&r"))
+ (clobber (match_dup 2))]
+ "TARGET_32BIT"
+ "ldr\t%0, [%2]\;ldr\t%2, %1\;eors\t%0, %2, %0"
+ [(set_attr "length" "8,12")
+ (set_attr "conds" "set")
+ (set_attr "type" "multiple")
+ (set_attr "arch" "t,32")]
+)
+
(define_expand "casesi"
[(match_operand:SI 0 "s_register_operand" "") ; index to jump on
(match_operand:SI 1 "const_int_operand" "") ; lower bound
diff -urpN a/gcc/config/arm/arm-protos.h b/gcc/config/arm/arm-protos.h
--- a/gcc/config/arm/arm-protos.h 2019-05-30 16:58:45.358508769 +0800
+++ b/gcc/config/arm/arm-protos.h 2019-05-30 11:52:58.491157149 +0800
@@ -28,7 +28,7 @@ extern enum unwind_info_type arm_except_
extern int use_return_insn (int, rtx);
extern bool use_simple_return_p (void);
extern enum reg_class arm_regno_class (int);
-extern void arm_load_pic_register (unsigned long);
+extern void arm_load_pic_register (unsigned long, rtx);
extern int arm_volatile_func (void);
extern void arm_expand_prologue (void);
extern void arm_expand_epilogue (bool);
@@ -69,7 +69,7 @@ extern int const_ok_for_dimode_op (HOST_
extern int arm_split_constant (RTX_CODE, machine_mode, rtx,
HOST_WIDE_INT, rtx, rtx, int);
extern int legitimate_pic_operand_p (rtx);
-extern rtx legitimize_pic_address (rtx, machine_mode, rtx);
+extern rtx legitimize_pic_address (rtx, machine_mode, rtx, rtx, bool);
extern rtx legitimize_tls_address (rtx, rtx);
extern bool arm_legitimate_address_p (machine_mode, rtx, bool);
extern int arm_legitimate_address_outer_p (machine_mode, rtx, RTX_CODE, int);
diff -urpN a/gcc/config/arm/predicates.md b/gcc/config/arm/predicates.md
--- a/gcc/config/arm/predicates.md 2019-05-30 16:58:45.358508769 +0800
+++ b/gcc/config/arm/predicates.md 2019-05-30 11:52:58.491157149 +0800
@@ -31,6 +31,23 @@
|| REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
})
+; Predicate for stack protector guard's address in
+; stack_protect_combined_set_insn and stack_protect_combined_test_insn patterns
+(define_predicate "guard_addr_operand"
+ (match_test "true")
+{
+ return (CONSTANT_ADDRESS_P (op)
+ || !targetm.cannot_force_const_mem (mode, op));
+})
+
+; Predicate for stack protector guard in stack_protect_combined_set and
+; stack_protect_combined_test patterns
+(define_predicate "guard_operand"
+ (match_code "mem")
+{
+ return guard_addr_operand (XEXP (op, 0), mode);
+})
+
(define_predicate "imm_for_neon_inv_logic_operand"
(match_code "const_vector")
{
diff -urpN a/gcc/config/arm/thumb1.md b/gcc/config/arm/thumb1.md
--- a/gcc/config/arm/thumb1.md 2019-05-30 16:58:45.358508769 +0800
+++ b/gcc/config/arm/thumb1.md 2019-05-30 11:52:58.491157149 +0800
@@ -1964,4 +1964,17 @@
}"
[(set_attr "type" "mov_reg")]
)
+
+(define_insn "thumb1_stack_protect_test_insn"
+ [(set (match_operand:SI 0 "register_operand" "=&l")
+ (unspec:SI [(match_operand:SI 1 "memory_operand" "m")
+ (mem:SI (match_operand:SI 2 "register_operand" "+l"))]
+ UNSPEC_SP_TEST))
+ (clobber (match_dup 2))]
+ "TARGET_THUMB1"
+ "ldr\t%0, [%2]\;ldr\t%2, %1\;eors\t%0, %2, %0"
+ [(set_attr "length" "8")
+ (set_attr "conds" "set")
+ (set_attr "type" "multiple")]
+)
diff -urpN a/gcc/config/arm/unspecs.md b/gcc/config/arm/unspecs.md
--- a/gcc/config/arm/unspecs.md 2019-05-30 16:58:45.358508769 +0800
+++ b/gcc/config/arm/unspecs.md 2019-05-30 11:52:58.491157149 +0800
@@ -86,6 +86,9 @@
UNSPEC_PROBE_STACK ; Probe stack memory reference
UNSPEC_NONSECURE_MEM ; Represent non-secure memory in ARMv8-M with
; security extension
+ UNSPEC_SP_SET ; Represent the setting of stack protector's canary
+ UNSPEC_SP_TEST ; Represent the testing of stack protector's canary
+ ; against the guard.
])
(define_c_enum "unspec" [
diff -urpN a/gcc/doc/md.texi b/gcc/doc/md.texi
--- a/gcc/doc/md.texi 2019-05-30 16:58:45.362508769 +0800
+++ b/gcc/doc/md.texi 2019-05-30 11:52:58.491157149 +0800
@@ -6955,22 +6955,61 @@ builtins.
The get/set patterns have a single output/input operand respectively,
with @var{mode} intended to be @code{Pmode}.
+@cindex @code{stack_protect_combined_set} instruction pattern
+@item @samp{stack_protect_combined_set}
+This pattern, if defined, moves a @code{ptr_mode} value from an address
+whose declaration RTX is given in operand 1 to the memory in operand 0
+without leaving the value in a register afterward. If several
+instructions are needed by the target to perform the operation (eg. to
+load the address from a GOT entry then load the @code{ptr_mode} value
+and finally store it), it is the backend's responsibility to ensure no
+intermediate result gets spilled. This is to avoid leaking the value
+some place that an attacker might use to rewrite the stack guard slot
+after having clobbered it.
+
+If this pattern is not defined, then the address declaration is
+expanded first in the standard way and a @code{stack_protect_set}
+pattern is then generated to move the value from that address to the
+address in operand 0.
+
@cindex @code{stack_protect_set} instruction pattern
@item @samp{stack_protect_set}
-This pattern, if defined, moves a @code{ptr_mode} value from the memory
-in operand 1 to the memory in operand 0 without leaving the value in
-a register afterward. This is to avoid leaking the value some place
-that an attacker might use to rewrite the stack guard slot after
-having clobbered it.
+This pattern, if defined, moves a @code{ptr_mode} value from the valid
+memory location in operand 1 to the memory in operand 0 without leaving
+the value in a register afterward. This is to avoid leaking the value
+some place that an attacker might use to rewrite the stack guard slot
+after having clobbered it.
+
+Note: on targets where the addressing modes do not allow to load
+directly from stack guard address, the address is expanded in a standard
+way first which could cause some spills.
If this pattern is not defined, then a plain move pattern is generated.
+@cindex @code{stack_protect_combined_test} instruction pattern
+@item @samp{stack_protect_combined_test}
+This pattern, if defined, compares a @code{ptr_mode} value from an
+address whose declaration RTX is given in operand 1 with the memory in
+operand 0 without leaving the value in a register afterward and
+branches to operand 2 if the values were equal. If several
+instructions are needed by the target to perform the operation (eg. to
+load the address from a GOT entry then load the @code{ptr_mode} value
+and finally store it), it is the backend's responsibility to ensure no
+intermediate result gets spilled. This is to avoid leaking the value
+some place that an attacker might use to rewrite the stack guard slot
+after having clobbered it.
+
+If this pattern is not defined, then the address declaration is
+expanded first in the standard way and a @code{stack_protect_test}
+pattern is then generated to compare the value from that address to the
+value at the memory in operand 0.
+
@cindex @code{stack_protect_test} instruction pattern
@item @samp{stack_protect_test}
This pattern, if defined, compares a @code{ptr_mode} value from the
-memory in operand 1 with the memory in operand 0 without leaving the
-value in a register afterward and branches to operand 2 if the values
-were equal.
+valid memory location in operand 1 with the memory in operand 0 without
+leaving the value in a register afterward and branches to operand 2 if
+the values were equal.
If this pattern is not defined, then a plain compare pattern and
conditional branch pattern is used.
diff -urpN a/gcc/function.c b/gcc/function.c
--- a/gcc/function.c 2019-05-30 16:58:45.362508769 +0800
+++ b/gcc/function.c 2019-05-30 11:53:14.071156599 +0800
@@ -5065,18 +5065,34 @@ stack_protect_epilogue (void)
tree guard_decl = targetm.stack_protect_guard ();
rtx_code_label *label = gen_label_rtx ();
rtx x, y;
- rtx_insn *seq;
+ rtx_insn *seq = NULL;
x = expand_normal (crtl->stack_protect_guard);
- if (guard_decl)
- y = expand_normal (guard_decl);
+
+ if (targetm.have_stack_protect_combined_test () && guard_decl)
+ {
+ gcc_assert (DECL_P (guard_decl));
+ y = DECL_RTL (guard_decl);
+ /* Allow the target to compute address of Y and compare it with X without
+ leaking Y into a register. This combined address + compare pattern
+ allows the target to prevent spilling of any intermediate results by
+ splitting it after register allocator. */
+ seq = targetm.gen_stack_protect_combined_test (x, y, label);
+ }
else
- y = const0_rtx;
+ {
+ if (guard_decl)
+ y = expand_normal (guard_decl);
+ else
+ y = const0_rtx;
+
+ /* Allow the target to compare Y with X without leaking either into
+ a register. */
+ if (targetm.have_stack_protect_test ())
+ seq = targetm.gen_stack_protect_test (x, y, label);
+ }
- /* Allow the target to compare Y with X without leaking either into
- a register. */
- if (targetm.have_stack_protect_test ()
- && ((seq = targetm.gen_stack_protect_test (x, y, label)) != NULL_RTX))
+ if (seq)
emit_insn (seq);
else
emit_cmp_and_jump_insns (x, y, EQ, NULL_RTX, ptr_mode, 1, label);
diff -urpN a/gcc/genpreds.c b/gcc/genpreds.c
--- a/gcc/genpreds.c 2019-05-30 16:58:45.362508769 +0800
+++ b/gcc/genpreds.c 2019-05-30 11:53:14.163156595 +0800
@@ -1581,7 +1581,8 @@ write_insn_preds_c (void)
#include \"reload.h\"\n\
#include \"regs.h\"\n\
#include \"emit-rtl.h\"\n\
-#include \"tm-constrs.h\"\n");
+#include \"tm-constrs.h\"\n\
+#include \"target.h\"\n");
FOR_ALL_PREDICATES (p)
write_one_predicate_function (p);
diff -urpN a/gcc/target-insns.def b/gcc/target-insns.def
--- a/gcc/target-insns.def 2019-05-30 16:58:45.362508769 +0800
+++ b/gcc/target-insns.def 2019-05-30 11:52:58.495157149 +0800
@@ -96,7 +96,9 @@ DEF_TARGET_INSN (sibcall_value, (rtx x0,
DEF_TARGET_INSN (simple_return, (void))
DEF_TARGET_INSN (split_stack_prologue, (void))
DEF_TARGET_INSN (split_stack_space_check, (rtx x0, rtx x1))
+DEF_TARGET_INSN (stack_protect_combined_set, (rtx x0, rtx x1))
DEF_TARGET_INSN (stack_protect_set, (rtx x0, rtx x1))
+DEF_TARGET_INSN (stack_protect_combined_test, (rtx x0, rtx x1, rtx x2))
DEF_TARGET_INSN (stack_protect_test, (rtx x0, rtx x1, rtx x2))
DEF_TARGET_INSN (store_multiple, (rtx x0, rtx x1, rtx x2))
DEF_TARGET_INSN (tablejump, (rtx x0, rtx x1))
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。