summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVyacheslav Cherkashin <v.cherkashin@samsung.com>2013-05-22 16:04:15 +0400
committerVyacheslav Cherkashin <v.cherkashin@samsung.com>2013-05-22 16:11:09 +0400
commit857fd45f59fc89554ce217b26533c36b50ef0021 (patch)
treef59e2895d8ce915a87490113b8248673a32b8e32
parent5034c90a052e9d30598eefb41288131c93d144c6 (diff)
downloadswap-modules-857fd45f59fc89554ce217b26533c36b50ef0021.tar.gz
swap-modules-857fd45f59fc89554ce217b26533c36b50ef0021.tar.bz2
swap-modules-857fd45f59fc89554ce217b26533c36b50ef0021.zip
[REFACTOR] move trampoline_probe_handler()
from src/modules/kprobe/arch/asm-arm/dbi_kprobes.c to src/modules/kprobe/dbi_kprobes.c
-rw-r--r--kprobe/arch/asm-arm/dbi_kprobes.c75
-rw-r--r--kprobe/arch/asm-arm/dbi_kprobes.h1
-rw-r--r--kprobe/dbi_kprobes.c74
3 files changed, 74 insertions, 76 deletions
diff --git a/kprobe/arch/asm-arm/dbi_kprobes.c b/kprobe/arch/asm-arm/dbi_kprobes.c
index a57053a5..f4e1a753 100644
--- a/kprobe/arch/asm-arm/dbi_kprobes.c
+++ b/kprobe/arch/asm-arm/dbi_kprobes.c
@@ -404,81 +404,6 @@ void arch_disarm_kprobe(struct kprobe *p)
flush_icache_range((unsigned long)p->addr, (unsigned long)p->addr + sizeof(kprobe_opcode_t));
}
-int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kretprobe_instance *ri = NULL;
- struct hlist_head *head;
- struct hlist_node *node, *tmp;
- unsigned long flags, orig_ret_address = 0;
- unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
-
- struct kretprobe *crp = NULL;
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- spin_lock_irqsave(&kretprobe_lock, flags);
-
- /*
- * We are using different hash keys (current and mm) for finding kernel
- * space and user space probes. Kernel space probes can change mm field in
- * task_struct. User space probes can be shared between threads of one
- * process so they have different current but same mm.
- */
- head = kretprobe_inst_table_head(current);
-
- /*
- * It is possible to have multiple instances associated with a given
- * task either because an multiple functions in the call path
- * have a return probe installed on them, and/or more then one
- * return probe was registered for a target function.
- *
- * We can handle this because:
- * - instances are always inserted at the head of the list
- * - when multiple return probes are registered for the same
- * function, the first instance's ret_addr will point to the
- * real return address, and all the rest will point to
- * kretprobe_trampoline
- */
- swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
- if (ri->rp && ri->rp->handler) {
- ri->rp->handler(ri, regs, ri->rp->priv_arg);
- }
-
- orig_ret_address = (unsigned long)ri->ret_addr;
- recycle_rp_inst(ri);
- if (orig_ret_address != trampoline_address)
- /*
- * This is the real return address. Any other
- * instances associated with this task are for
- * other calls deeper on the call stack
- */
- break;
- }
- kretprobe_assert(ri, orig_ret_address, trampoline_address);
-
- regs->ARM_lr = orig_ret_address;
- regs->ARM_pc = orig_ret_address;
-
- if (kcb->kprobe_status == KPROBE_REENTER) {
- restore_previous_kprobe(kcb);
- } else {
- reset_current_kprobe();
- }
-
- spin_unlock_irqrestore(&kretprobe_lock, flags);
-
- /*
- * By returning a non-zero value, we are telling
- * kprobe_handler() that we don't want the post_handler
- * to run (and have re-enabled preemption)
- */
-
- return 1;
-}
-EXPORT_SYMBOL_GPL(trampoline_probe_handler);
-
void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *)regs->ARM_lr;
diff --git a/kprobe/arch/asm-arm/dbi_kprobes.h b/kprobe/arch/asm-arm/dbi_kprobes.h
index 3bd5b3ff..5e36549a 100644
--- a/kprobe/arch/asm-arm/dbi_kprobes.h
+++ b/kprobe/arch/asm-arm/dbi_kprobes.h
@@ -503,7 +503,6 @@ void arch_disarm_kprobe(struct kprobe *p);
int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs);
int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs);
-int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs);
void save_previous_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *cur_p);
void restore_previous_kprobe(struct kprobe_ctlblk *kcb);
diff --git a/kprobe/dbi_kprobes.c b/kprobe/dbi_kprobes.c
index 3b4e48fd..681c8b4d 100644
--- a/kprobe/dbi_kprobes.c
+++ b/kprobe/dbi_kprobes.c
@@ -633,6 +633,80 @@ static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
return 0;
}
+int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct kretprobe_instance *ri = NULL;
+ struct hlist_head *head;
+ struct hlist_node *node, *tmp;
+ unsigned long flags, orig_ret_address = 0;
+ unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
+
+ struct kretprobe *crp = NULL;
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ spin_lock_irqsave(&kretprobe_lock, flags);
+
+ /*
+ * We are using different hash keys (current and mm) for finding kernel
+ * space and user space probes. Kernel space probes can change mm field in
+ * task_struct. User space probes can be shared between threads of one
+ * process so they have different current but same mm.
+ */
+ head = kretprobe_inst_table_head(current);
+
+ /*
+ * It is possible to have multiple instances associated with a given
+ * task either because an multiple functions in the call path
+ * have a return probe installed on them, and/or more then one
+ * return probe was registered for a target function.
+ *
+ * We can handle this because:
+ * - instances are always inserted at the head of the list
+ * - when multiple return probes are registered for the same
+ * function, the first instance's ret_addr will point to the
+ * real return address, and all the rest will point to
+ * kretprobe_trampoline
+ */
+ swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ if (ri->task != current)
+ /* another task is sharing our hash bucket */
+ continue;
+ if (ri->rp && ri->rp->handler) {
+ ri->rp->handler(ri, regs, ri->rp->priv_arg);
+ }
+
+ orig_ret_address = (unsigned long)ri->ret_addr;
+ recycle_rp_inst(ri);
+ if (orig_ret_address != trampoline_address)
+ /*
+ * This is the real return address. Any other
+ * instances associated with this task are for
+ * other calls deeper on the call stack
+ */
+ break;
+ }
+ kretprobe_assert(ri, orig_ret_address, trampoline_address);
+
+ dbi_set_ret_addr(regs, orig_ret_address);
+ dbi_set_instr_ptr(regs, orig_ret_address);
+
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ } else {
+ reset_current_kprobe();
+ }
+
+ spin_unlock_irqrestore(&kretprobe_lock, flags);
+
+ /*
+ * By returning a non-zero value, we are telling
+ * kprobe_handler() that we don't want the post_handler
+ * to run (and have re-enabled preemption)
+ */
+
+ return 1;
+}
+
struct kretprobe *sched_rp;
#define SCHED_RP_NR 200