summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-12-10 02:42:03 -0800
committerDavid S. Miller <davem@sunset.davemloft.net>2006-12-10 02:42:03 -0800
commitf0882589666440d573f657cb3a1d5f66f3caa157 (patch)
treeb2c7d133a0c6140950e928e592cf1f3cf867d4d2
parent2f149228bb30ea08bfde740178f832d5c9081005 (diff)
downloadlinux-3.10-f0882589666440d573f657cb3a1d5f66f3caa157.tar.gz
linux-3.10-f0882589666440d573f657cb3a1d5f66f3caa157.tar.bz2
linux-3.10-f0882589666440d573f657cb3a1d5f66f3caa157.zip
[SPARC64]: Fix several kprobes bugs.
- relbranch_fixup(), for non-branches, would end up setting regs->tnpc incorrectly, in fact it would set it equal to regs->tpc which would cause that instruction to execute twice Also, if this is not a PC-relative branch, we should just leave regs->tnpc as-is. This covers cases like 'jmpl' which branch to absolute values. - To be absolutely %100 safe, we need to flush the instruction cache for all assignments to kprobe->ainsn.insn[], including cases like add_aggr_kprobe() - prev_kprobe's status field needs to be 'unsigned long' to match the type of the value it is saving - jprobes were totally broken: = jprobe_return() can run in the stack frame of the jprobe handler, or in an even deeper stack frame, thus we'll be in the wrong register window than the one from the original probe state. So unwind using 'restore' instructions, if necessary, right before we do the jprobe_return() breakpoint trap. = There is no reason to save/restore the register window saved at %sp at jprobe trigger time. Those registers cannot be modified by the jprobe handler. Also, this code was saving and restoring "sizeof (struct sparc_stackf)" bytes. Depending upon the caller, this could clobber unrelated stack frame pieces if there is only a basic 128-byte register window stored on the stack, without the argument save area. So just saving and restoring struct pt_regs is sufficient. = Kill the "jprobe_saved_esp", totally unused. Also, delete "jprobe_saved_regs_location", with the stack frame unwind now done explicitly by jprobe_return(), this check is superfluous. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc64/kernel/kprobes.c91
-rw-r--r--include/asm-sparc64/kprobes.h11
2 files changed, 49 insertions, 53 deletions
diff --git a/arch/sparc64/kernel/kprobes.c b/arch/sparc64/kernel/kprobes.c
index 8e75ed762fd..ae221f0d4a6 100644
--- a/arch/sparc64/kernel/kprobes.c
+++ b/arch/sparc64/kernel/kprobes.c
@@ -45,7 +45,11 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
p->ainsn.insn[0] = *p->addr;
+ flushi(&p->ainsn.insn[0]);
+
p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2;
+ flushi(&p->ainsn.insn[1]);
+
p->opcode = *p->addr;
return 0;
}
@@ -185,16 +189,19 @@ no_kprobe:
/* If INSN is a relative control transfer instruction,
* return the corrected branch destination value.
*
- * The original INSN location was REAL_PC, it actually
- * executed at PC and produced destination address NPC.
+ * regs->tpc and regs->tnpc still hold the values of the
+ * program counters at the time of trap due to the execution
+ * of the BREAKPOINT_INSTRUCTION_2 at p->ainsn.insn[1]
+ *
*/
-static unsigned long __kprobes relbranch_fixup(u32 insn, unsigned long real_pc,
- unsigned long pc,
- unsigned long npc)
+static unsigned long __kprobes relbranch_fixup(u32 insn, struct kprobe *p,
+ struct pt_regs *regs)
{
+ unsigned long real_pc = (unsigned long) p->addr;
+
/* Branch not taken, no mods necessary. */
- if (npc == pc + 0x4UL)
- return real_pc + 0x4UL;
+ if (regs->tnpc == regs->tpc + 0x4UL)
+ return real_pc + 0x8UL;
/* The three cases are call, branch w/prediction,
* and traditional branch.
@@ -202,14 +209,21 @@ static unsigned long __kprobes relbranch_fixup(u32 insn, unsigned long real_pc,
if ((insn & 0xc0000000) == 0x40000000 ||
(insn & 0xc1c00000) == 0x00400000 ||
(insn & 0xc1c00000) == 0x00800000) {
+ unsigned long ainsn_addr;
+
+ ainsn_addr = (unsigned long) &p->ainsn.insn[0];
+
/* The instruction did all the work for us
* already, just apply the offset to the correct
* instruction location.
*/
- return (real_pc + (npc - pc));
+ return (real_pc + (regs->tnpc - ainsn_addr));
}
- return real_pc + 0x4UL;
+ /* It is jmpl or some other absolute PC modification instruction,
+ * leave NPC as-is.
+ */
+ return regs->tnpc;
}
/* If INSN is an instruction which writes it's PC location
@@ -220,12 +234,12 @@ static void __kprobes retpc_fixup(struct pt_regs *regs, u32 insn,
{
unsigned long *slot = NULL;
- /* Simplest cast is call, which always uses %o7 */
+ /* Simplest case is 'call', which always uses %o7 */
if ((insn & 0xc0000000) == 0x40000000) {
slot = &regs->u_regs[UREG_I7];
}
- /* Jmpl encodes the register inside of the opcode */
+ /* 'jmpl' encodes the register inside of the opcode */
if ((insn & 0xc1f80000) == 0x81c00000) {
unsigned long rd = ((insn >> 25) & 0x1f);
@@ -247,11 +261,11 @@ static void __kprobes retpc_fixup(struct pt_regs *regs, u32 insn,
/*
* Called after single-stepping. p->addr is the address of the
- * instruction whose first byte has been replaced by the breakpoint
+ * instruction which has been replaced by the breakpoint
* instruction. To avoid the SMP problems that can occur when we
* temporarily put back the original opcode to single-step, we
* single-stepped a copy of the instruction. The address of this
- * copy is p->ainsn.insn.
+ * copy is &p->ainsn.insn[0].
*
* This function prepares to return from the post-single-step
* breakpoint trap.
@@ -261,11 +275,11 @@ static void __kprobes resume_execution(struct kprobe *p,
{
u32 insn = p->ainsn.insn[0];
+ regs->tnpc = relbranch_fixup(insn, p, regs);
+
+ /* This assignment must occur after relbranch_fixup() */
regs->tpc = kcb->kprobe_orig_tnpc;
- regs->tnpc = relbranch_fixup(insn,
- (unsigned long) p->addr,
- (unsigned long) &p->ainsn.insn[0],
- regs->tnpc);
+
retpc_fixup(regs, insn, (unsigned long) p->addr);
regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
@@ -430,17 +444,8 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
struct jprobe *jp = container_of(p, struct jprobe, kp);
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- kcb->jprobe_saved_regs_location = regs;
memcpy(&(kcb->jprobe_saved_regs), regs, sizeof(*regs));
- /* Save a whole stack frame, this gets arguments
- * pushed onto the stack after using up all the
- * arg registers.
- */
- memcpy(&(kcb->jprobe_saved_stack),
- (char *) (regs->u_regs[UREG_FP] + STACK_BIAS),
- sizeof(kcb->jprobe_saved_stack));
-
regs->tpc = (unsigned long) jp->entry;
regs->tnpc = ((unsigned long) jp->entry) + 0x4UL;
regs->tstate |= TSTATE_PIL;
@@ -450,10 +455,19 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
void __kprobes jprobe_return(void)
{
- __asm__ __volatile__(
- ".globl jprobe_return_trap_instruction\n"
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ register unsigned long orig_fp asm("g1");
+
+ orig_fp = kcb->jprobe_saved_regs.u_regs[UREG_FP];
+ __asm__ __volatile__("\n"
+"1: cmp %%sp, %0\n\t"
+ "blu,a,pt %%xcc, 1b\n\t"
+ " restore\n\t"
+ ".globl jprobe_return_trap_instruction\n"
"jprobe_return_trap_instruction:\n\t"
- "ta 0x70");
+ "ta 0x70"
+ : /* no outputs */
+ : "r" (orig_fp));
}
extern void jprobe_return_trap_instruction(void);
@@ -466,26 +480,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
if (addr == (u32 *) jprobe_return_trap_instruction) {
- if (kcb->jprobe_saved_regs_location != regs) {
- printk("JPROBE: Current regs (%p) does not match "
- "saved regs (%p).\n",
- regs, kcb->jprobe_saved_regs_location);
- printk("JPROBE: Saved registers\n");
- __show_regs(kcb->jprobe_saved_regs_location);
- printk("JPROBE: Current registers\n");
- __show_regs(regs);
- BUG();
- }
- /* Restore old register state. Do pt_regs
- * first so that UREG_FP is the original one for
- * the stack frame restore.
- */
memcpy(regs, &(kcb->jprobe_saved_regs), sizeof(*regs));
-
- memcpy((char *) (regs->u_regs[UREG_FP] + STACK_BIAS),
- &(kcb->jprobe_saved_stack),
- sizeof(kcb->jprobe_saved_stack));
-
preempt_enable_no_resched();
return 1;
}
diff --git a/include/asm-sparc64/kprobes.h b/include/asm-sparc64/kprobes.h
index c9f5c34d318..becc38fa06c 100644
--- a/include/asm-sparc64/kprobes.h
+++ b/include/asm-sparc64/kprobes.h
@@ -13,7 +13,11 @@ typedef u32 kprobe_opcode_t;
#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry
#define arch_remove_kprobe(p) do {} while (0)
#define ARCH_INACTIVE_KPROBE_COUNT 0
-#define flush_insn_slot(p) do { } while (0)
+
+#define flush_insn_slot(p) \
+do { flushi(&(p)->ainsn.insn[0]); \
+ flushi(&(p)->ainsn.insn[1]); \
+} while (0)
/* Architecture specific copy of original instruction*/
struct arch_specific_insn {
@@ -23,7 +27,7 @@ struct arch_specific_insn {
struct prev_kprobe {
struct kprobe *kp;
- unsigned int status;
+ unsigned long status;
unsigned long orig_tnpc;
unsigned long orig_tstate_pil;
};
@@ -33,10 +37,7 @@ struct kprobe_ctlblk {
unsigned long kprobe_status;
unsigned long kprobe_orig_tnpc;
unsigned long kprobe_orig_tstate_pil;
- long *jprobe_saved_esp;
struct pt_regs jprobe_saved_regs;
- struct pt_regs *jprobe_saved_regs_location;
- struct sparc_stackf jprobe_saved_stack;
struct prev_kprobe prev_kprobe;
};