diff options
author | Vyacheslav Cherkashin <v.cherkashin@samsung.com> | 2013-04-03 11:07:45 +0400 |
---|---|---|
committer | Vyacheslav Cherkashin <v.cherkashin@samsung.com> | 2013-04-03 11:07:45 +0400 |
commit | 406edfed12048e9a1f45896a32f5acfefd5c375e (patch) | |
tree | 93f082b5c6151a66e6739d0f66b5d5f5753c9e1d /kprobe | |
parent | 343ad84f0223fca99075dee4c63c3d881edac986 (diff) | |
download | swap-modules-406edfed12048e9a1f45896a32f5acfefd5c375e.tar.gz swap-modules-406edfed12048e9a1f45896a32f5acfefd5c375e.tar.bz2 swap-modules-406edfed12048e9a1f45896a32f5acfefd5c375e.zip |
[REFACTOR] code style fix for 'dbi_kprobes.c'
src/modules/kprobe/dbi_kprobes.c
Diffstat (limited to 'kprobe')
-rw-r--r-- | kprobe/dbi_kprobes.c | 554 |
1 files changed, 261 insertions, 293 deletions
diff --git a/kprobe/dbi_kprobes.c b/kprobe/dbi_kprobes.c index f71dddab..1a629def 100644 --- a/kprobe/dbi_kprobes.c +++ b/kprobe/dbi_kprobes.c @@ -1,5 +1,3 @@ -// src_kprobes.c - /* * Kernel Probes (KProbes) * kernel/kprobes.c @@ -69,12 +67,12 @@ extern struct hlist_head kprobe_insn_pages; -DEFINE_PER_CPU (struct kprobe *, current_kprobe) = NULL; -static DEFINE_PER_CPU (struct kprobe_ctlblk, kprobe_ctlblk); +DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; +static DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); -DEFINE_SPINLOCK (kretprobe_lock); /* Protects kretprobe_inst_table */ +DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ EXPORT_SYMBOL_GPL(kretprobe_lock); -static DEFINE_PER_CPU (struct kprobe *, kprobe_instance) = NULL; +static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; @@ -82,57 +80,54 @@ static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; atomic_t kprobe_count; EXPORT_SYMBOL_GPL(kprobe_count); -void kretprobe_assert (struct kretprobe_instance *ri, unsigned long orig_ret_address, unsigned long trampoline_address) +void kretprobe_assert(struct kretprobe_instance *ri, unsigned long orig_ret_address, unsigned long trampoline_address) { if (!orig_ret_address || (orig_ret_address == trampoline_address)) { struct task_struct *task; if (ri == NULL) { - panic ("kretprobe BUG!: ri = NULL\n"); + panic("kretprobe BUG!: ri = NULL\n"); } task = ri->task; if (task == NULL) { - panic ("kretprobe BUG!: task = NULL\n"); + panic("kretprobe BUG!: task = NULL\n"); } if (ri->rp == NULL) { - panic ("kretprobe BUG!: ri->rp = NULL\n"); + panic("kretprobe BUG!: ri->rp = NULL\n"); } - panic ("kretprobe BUG!: Processing kretprobe %p @ %p (%d/%d - %s)\n", - ri->rp, ri->rp->kp.addr, ri->task->tgid, ri->task->pid, ri->task->comm); + panic("kretprobe BUG!: Processing kretprobe %p @ %p (%d/%d - %s)\n", + ri->rp, ri->rp->kp.addr, ri->task->tgid, ri->task->pid, ri->task->comm); } } - /* We have preemption disabled.. so it is safe to use __ versions */ -static inline -void set_kprobe_instance (struct kprobe *kp) +static inline void set_kprobe_instance(struct kprobe *kp) { - __get_cpu_var (kprobe_instance) = kp; + __get_cpu_var(kprobe_instance) = kp; } -static inline -void reset_kprobe_instance (void) +static inline void reset_kprobe_instance(void) { - __get_cpu_var (kprobe_instance) = NULL; + __get_cpu_var(kprobe_instance) = NULL; } /* kprobe_running() will just return the current_kprobe on this CPU */ -struct kprobe *kprobe_running (void) +struct kprobe *kprobe_running(void) { - return (__get_cpu_var (current_kprobe)); + return __get_cpu_var(current_kprobe); } -void reset_current_kprobe (void) +void reset_current_kprobe(void) { - __get_cpu_var (current_kprobe) = NULL; + __get_cpu_var(current_kprobe) = NULL; } -struct kprobe_ctlblk *get_kprobe_ctlblk (void) +struct kprobe_ctlblk *get_kprobe_ctlblk(void) { - return (&__get_cpu_var (kprobe_ctlblk)); + return &__get_cpu_var(kprobe_ctlblk); } /* @@ -161,123 +156,126 @@ struct kprobe *get_kprobe(void *addr) * Aggregate handlers for multiple kprobes support - these handlers * take care of invoking the individual kprobe handlers on p->list */ -static -int aggr_pre_handler (struct kprobe *p, struct pt_regs *regs) +static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) { struct kprobe *kp; int ret; - list_for_each_entry_rcu (kp, &p->list, list) - { - if (kp->pre_handler) - { - set_kprobe_instance (kp); - ret = kp->pre_handler (kp, regs); + list_for_each_entry_rcu(kp, &p->list, list) { + if (kp->pre_handler) { + set_kprobe_instance(kp); + ret = kp->pre_handler(kp, regs); if (ret) return ret; } - reset_kprobe_instance (); + reset_kprobe_instance(); } + return 0; } -static -void aggr_post_handler (struct kprobe *p, struct pt_regs *regs, unsigned long flags) +static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, unsigned long flags) { struct kprobe *kp; - list_for_each_entry_rcu (kp, &p->list, list) - { - if (kp->post_handler) - { - set_kprobe_instance (kp); - kp->post_handler (kp, regs, flags); - reset_kprobe_instance (); + list_for_each_entry_rcu(kp, &p->list, list) { + if (kp->post_handler) { + set_kprobe_instance(kp); + kp->post_handler(kp, regs, flags); + reset_kprobe_instance(); } } } -static -int aggr_fault_handler (struct kprobe *p, struct pt_regs *regs, int trapnr) +static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr) { - struct kprobe *cur = __get_cpu_var (kprobe_instance); + struct kprobe *cur = __get_cpu_var(kprobe_instance); /* * if we faulted "during" the execution of a user specified * probe handler, invoke just that probe's fault handler */ - if (cur && cur->fault_handler) - { - if (cur->fault_handler (cur, regs, trapnr)) + if (cur && cur->fault_handler) { + if (cur->fault_handler(cur, regs, trapnr)) return 1; } + return 0; } -static -int aggr_break_handler (struct kprobe *p, struct pt_regs *regs) +static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs) { - struct kprobe *cur = __get_cpu_var (kprobe_instance); + struct kprobe *cur = __get_cpu_var(kprobe_instance); int ret = 0; DBPRINTF ("cur = 0x%p\n", cur); if (cur) DBPRINTF ("cur = 0x%p cur->break_handler = 0x%p\n", cur, cur->break_handler); - if (cur && cur->break_handler) - { - if (cur->break_handler (cur, regs /*, vma, page, kaddr */ )) + if (cur && cur->break_handler) { + if (cur->break_handler(cur, regs)) ret = 1; } - reset_kprobe_instance (); + reset_kprobe_instance(); + return ret; } /* Walks the list and increments nmissed count for multiprobe case */ -void kprobes_inc_nmissed_count (struct kprobe *p) +void kprobes_inc_nmissed_count(struct kprobe *p) { struct kprobe *kp; - if (p->pre_handler != aggr_pre_handler) - { + if (p->pre_handler != aggr_pre_handler) { p->nmissed++; + } else { + list_for_each_entry_rcu(kp, &p->list, list) { + ++kp->nmissed; + } } - else - { - list_for_each_entry_rcu (kp, &p->list, list) kp->nmissed++; - } - return; } /* Called with kretprobe_lock held */ -struct kretprobe_instance *get_free_rp_inst (struct kretprobe *rp) +struct kretprobe_instance *get_free_rp_inst(struct kretprobe *rp) { struct hlist_node *node; struct kretprobe_instance *ri; - hlist_for_each_entry (ri, node, &rp->free_instances, uflist) + + hlist_for_each_entry(ri, node, &rp->free_instances, uflist) { return ri; - if(!alloc_nodes_kretprobe(rp)){ - hlist_for_each_entry (ri, node, &rp->free_instances, uflist) - return ri; } + + if (!alloc_nodes_kretprobe(rp)) { + hlist_for_each_entry(ri, node, &rp->free_instances, uflist) { + return ri; + } + } + return NULL; } EXPORT_SYMBOL_GPL(get_free_rp_inst); /* Called with kretprobe_lock held */ -struct kretprobe_instance *get_free_rp_inst_no_alloc (struct kretprobe *rp) +struct kretprobe_instance *get_free_rp_inst_no_alloc(struct kretprobe *rp) { struct hlist_node *node; struct kretprobe_instance *ri; - hlist_for_each_entry (ri, node, &rp->free_instances, uflist) + + hlist_for_each_entry(ri, node, &rp->free_instances, uflist) { return ri; + } + return NULL; } /* Called with kretprobe_lock held */ -struct kretprobe_instance *get_used_rp_inst (struct kretprobe *rp) +struct kretprobe_instance *get_used_rp_inst(struct kretprobe *rp) { struct hlist_node *node; struct kretprobe_instance *ri; - hlist_for_each_entry (ri, node, &rp->used_instances, uflist) return ri; + + hlist_for_each_entry(ri, node, &rp->used_instances, uflist) { + return ri; + } + return NULL; } EXPORT_SYMBOL_GPL(get_used_rp_inst); @@ -289,52 +287,50 @@ void add_rp_inst (struct kretprobe_instance *ri) * Remove rp inst off the free list - * Add it back when probed function returns */ - hlist_del (&ri->uflist); + hlist_del(&ri->uflist); /* Add rp inst onto table */ - INIT_HLIST_NODE (&ri->hlist); + INIT_HLIST_NODE(&ri->hlist); hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash_ptr(ri->task, KPROBE_HASH_BITS)]); /* Also add this rp inst to the used list. */ - INIT_HLIST_NODE (&ri->uflist); - hlist_add_head (&ri->uflist, &ri->rp->used_instances); + INIT_HLIST_NODE(&ri->uflist); + hlist_add_head(&ri->uflist, &ri->rp->used_instances); } EXPORT_SYMBOL_GPL(add_rp_inst); /* Called with kretprobe_lock held */ -void recycle_rp_inst (struct kretprobe_instance *ri) +void recycle_rp_inst(struct kretprobe_instance *ri) { - if (ri->rp) - { - hlist_del (&ri->hlist); + if (ri->rp) { + hlist_del(&ri->hlist); /* remove rp inst off the used list */ - hlist_del (&ri->uflist); + hlist_del(&ri->uflist); /* put rp inst back onto the free list */ - INIT_HLIST_NODE (&ri->uflist); - hlist_add_head (&ri->uflist, &ri->rp->free_instances); + INIT_HLIST_NODE(&ri->uflist); + hlist_add_head(&ri->uflist, &ri->rp->free_instances); } else if (!ri->rp2) { /* * This is __switch_to retprobe instance. It has neither rp nor rp2. */ - hlist_del (&ri->hlist); + hlist_del(&ri->hlist); } } EXPORT_SYMBOL_GPL(recycle_rp_inst); -struct hlist_head * kretprobe_inst_table_head (void *hash_key) +struct hlist_head *kretprobe_inst_table_head(void *hash_key) { - return &kretprobe_inst_table[hash_ptr (hash_key, KPROBE_HASH_BITS)]; + return &kretprobe_inst_table[hash_ptr(hash_key, KPROBE_HASH_BITS)]; } EXPORT_SYMBOL_GPL(kretprobe_inst_table_head); -void free_rp_inst (struct kretprobe *rp) +void free_rp_inst(struct kretprobe *rp) { struct kretprobe_instance *ri; - while ((ri = get_free_rp_inst_no_alloc (rp)) != NULL) - { - hlist_del (&ri->uflist); - kfree (ri); + while ((ri = get_free_rp_inst_no_alloc(rp)) != NULL) { + hlist_del(&ri->uflist); + kfree(ri); } } EXPORT_SYMBOL_GPL(free_rp_inst); @@ -342,36 +338,38 @@ EXPORT_SYMBOL_GPL(free_rp_inst); /* * Keep all fields in the kprobe consistent */ -static inline -void copy_kprobe (struct kprobe *old_p, struct kprobe *p) +static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p) { - memcpy (&p->opcode, &old_p->opcode, sizeof (kprobe_opcode_t)); - memcpy (&p->ainsn, &old_p->ainsn, sizeof (struct arch_specific_insn)); + memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t)); + memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn)); p->ss_addr = old_p->ss_addr; #ifdef CONFIG_ARM p->safe_arm = old_p->safe_arm; p->safe_thumb = old_p->safe_thumb; #endif - //p->spid = old_p->spid; } /* * Add the new probe to old_p->list. Fail if this is the * second jprobe at the address - two jprobes can't coexist */ -static int add_new_kprobe (struct kprobe *old_p, struct kprobe *p) +static int add_new_kprobe(struct kprobe *old_p, struct kprobe *p) { - if (p->break_handler) - { - if (old_p->break_handler) + if (p->break_handler) { + if (old_p->break_handler) { return -EEXIST; - list_add_tail_rcu (&p->list, &old_p->list); + } + + list_add_tail_rcu(&p->list, &old_p->list); old_p->break_handler = aggr_break_handler; + } else { + list_add_rcu(&p->list, &old_p->list); } - else - list_add_rcu (&p->list, &old_p->list); - if (p->post_handler && !old_p->post_handler) + + if (p->post_handler && !old_p->post_handler) { old_p->post_handler = aggr_post_handler; + } + return 0; } @@ -382,13 +380,13 @@ static int add_new_kprobe (struct kprobe *old_p, struct kprobe *p) * * The @old entry will be replaced with the @new entry atomically. */ -inline void dbi_hlist_replace_rcu (struct hlist_node *old, struct hlist_node *new) +inline void dbi_hlist_replace_rcu(struct hlist_node *old, struct hlist_node *new) { struct hlist_node *next = old->next; new->next = next; new->pprev = old->pprev; - smp_wmb (); + smp_wmb(); if (next) new->next->pprev = &new->next; if (new->pprev) @@ -396,16 +394,13 @@ inline void dbi_hlist_replace_rcu (struct hlist_node *old, struct hlist_node *ne old->pprev = LIST_POISON2; } - /* * Fill in the required fields of the "manager kprobe". Replace the * earlier kprobe in the hlist with the manager kprobe */ -static inline -void add_aggr_kprobe (struct kprobe *ap, struct kprobe *p) +static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) { - copy_kprobe (p, ap); - //flush_insn_slot (ap); + copy_kprobe(p, ap); ap->addr = p->addr; ap->pre_handler = aggr_pre_handler; ap->fault_handler = aggr_fault_handler; @@ -414,105 +409,100 @@ void add_aggr_kprobe (struct kprobe *ap, struct kprobe *p) if (p->break_handler) ap->break_handler = aggr_break_handler; - INIT_LIST_HEAD (&ap->list); - list_add_rcu (&p->list, &ap->list); + INIT_LIST_HEAD(&ap->list); + list_add_rcu(&p->list, &ap->list); - dbi_hlist_replace_rcu (&p->hlist, &ap->hlist); + dbi_hlist_replace_rcu(&p->hlist, &ap->hlist); } /* * This is the second or subsequent kprobe at the address - handle * the intricacies */ -int register_aggr_kprobe (struct kprobe *old_p, struct kprobe *p) +int register_aggr_kprobe(struct kprobe *old_p, struct kprobe *p) { int ret = 0; struct kprobe *ap; DBPRINTF ("start\n"); DBPRINTF ("p = %p old_p = %p \n", p, old_p); - if (old_p->pre_handler == aggr_pre_handler) - { + if (old_p->pre_handler == aggr_pre_handler) { DBPRINTF ("aggr_pre_handler \n"); - copy_kprobe (old_p, p); - ret = add_new_kprobe (old_p, p); - } - else - { + copy_kprobe(old_p, p); + ret = add_new_kprobe(old_p, p); + } else { DBPRINTF ("kzalloc\n"); - #ifdef kzalloc - ap = kzalloc (sizeof (struct kprobe), GFP_KERNEL); + ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL); #else - ap = kmalloc (sizeof (struct kprobe), GFP_KERNEL); + ap = kmalloc(sizeof(struct kprobe), GFP_KERNEL); if (ap) - memset (ap, 0, sizeof (struct kprobe)); + memset(ap, 0, sizeof(struct kprobe)); #endif if (!ap) return -ENOMEM; - add_aggr_kprobe (ap, old_p); - copy_kprobe (ap, p); + add_aggr_kprobe(ap, old_p); + copy_kprobe(ap, p); DBPRINTF ("ap = %p p = %p old_p = %p \n", ap, p, old_p); - ret = add_new_kprobe (ap, p); + ret = add_new_kprobe(ap, p); } + return ret; } EXPORT_SYMBOL_GPL(register_aggr_kprobe); -int dbi_register_kprobe (struct kprobe *p) -{ - struct kprobe *old_p; - int ret = 0; - /* - * If we have a symbol_name argument look it up, - * and add it to the address. That way the addr - * field can either be global or relative to a symbol. - */ - if (p->symbol_name) - { - if (p->addr) - return -EINVAL; - p->addr = (kprobe_opcode_t *)swap_ksyms(p->symbol_name); - } - - if (!p->addr) - return -EINVAL; - DBPRINTF ("p->addr = 0x%p\n", p->addr); - p->addr = (kprobe_opcode_t *) (((char *) p->addr) + p->offset); - DBPRINTF ("p->addr = 0x%p p = 0x%p\n", p->addr, p); +int dbi_register_kprobe(struct kprobe *p) +{ + struct kprobe *old_p; + int ret = 0; + /* + * If we have a symbol_name argument look it up, + * and add it to the address. That way the addr + * field can either be global or relative to a symbol. + */ + if (p->symbol_name) { + if (p->addr) + return -EINVAL; + p->addr = (kprobe_opcode_t *)swap_ksyms(p->symbol_name); + } + + if (!p->addr) + return -EINVAL; + DBPRINTF ("p->addr = 0x%p\n", p->addr); + p->addr = (kprobe_opcode_t *)(((char *)p->addr) + p->offset); + DBPRINTF ("p->addr = 0x%p p = 0x%p\n", p->addr, p); #ifdef KPROBES_PROFILE - p->start_tm.tv_sec = p->start_tm.tv_usec = 0; - p->hnd_tm_sum.tv_sec = p->hnd_tm_sum.tv_usec = 0; - p->count = 0; + p->start_tm.tv_sec = p->start_tm.tv_usec = 0; + p->hnd_tm_sum.tv_sec = p->hnd_tm_sum.tv_usec = 0; + p->count = 0; #endif - p->mod_refcounted = 0; - p->nmissed = 0; - - old_p = get_kprobe(p->addr); - if (old_p) - { - ret = register_aggr_kprobe (old_p, p); - if (!ret) - atomic_inc (&kprobe_count); - goto out; - } - - if ((ret = arch_prepare_kprobe (p)) != 0) - goto out; - - DBPRINTF ("before out ret = 0x%x\n", ret); - INIT_HLIST_NODE (&p->hlist); - hlist_add_head_rcu (&p->hlist, &kprobe_table[hash_ptr (p->addr, KPROBE_HASH_BITS)]); - arch_arm_kprobe (p); + p->mod_refcounted = 0; + p->nmissed = 0; + + old_p = get_kprobe(p->addr); + if (old_p) { + ret = register_aggr_kprobe(old_p, p); + if (!ret) + atomic_inc(&kprobe_count); + goto out; + } + + if ((ret = arch_prepare_kprobe(p)) != 0) + goto out; + + DBPRINTF ("before out ret = 0x%x\n", ret); + INIT_HLIST_NODE(&p->hlist); + hlist_add_head_rcu(&p->hlist, &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); + arch_arm_kprobe(p); out: - DBPRINTF ("out ret = 0x%x\n", ret); - return ret; + DBPRINTF ("out ret = 0x%x\n", ret); + return ret; } -void dbi_unregister_kprobe (struct kprobe *p, struct task_struct *task) +void dbi_unregister_kprobe(struct kprobe *p, struct task_struct *task) { struct kprobe *old_p, *list_p; int cleanup_p; @@ -522,9 +512,8 @@ void dbi_unregister_kprobe (struct kprobe *p, struct task_struct *task) if (unlikely (!old_p)) return; - if (p != old_p) - { - list_for_each_entry_rcu (list_p, &old_p->list, list) + if (p != old_p) { + list_for_each_entry_rcu(list_p, &old_p->list, list) if (list_p == p) /* kprobe p is a valid probe */ goto valid_p; @@ -534,26 +523,21 @@ void dbi_unregister_kprobe (struct kprobe *p, struct task_struct *task) valid_p: DBPRINTF ("dbi_unregister_kprobe valid_p"); if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) && - (p->list.next == &old_p->list) && (p->list.prev == &old_p->list))) - { + (p->list.next == &old_p->list) && (p->list.prev == &old_p->list))) { /* Only probe on the hash list */ arch_disarm_kprobe(p); - hlist_del_rcu (&old_p->hlist); + hlist_del_rcu(&old_p->hlist); cleanup_p = 1; - } - else - { - list_del_rcu (&p->list); + } else { + list_del_rcu(&p->list); cleanup_p = 0; } DBPRINTF ("dbi_unregister_kprobe cleanup_p=%d", cleanup_p); - if (cleanup_p) - { - if (p != old_p) - { - list_del_rcu (&p->list); - kfree (old_p); + if (cleanup_p) { + if (p != old_p) { + list_del_rcu(&p->list); + kfree(old_p); } if (!in_atomic()) { @@ -561,56 +545,52 @@ valid_p: } arch_remove_kprobe(p); - } - else - { + } else { if (p->break_handler) old_p->break_handler = NULL; - if (p->post_handler) - { - list_for_each_entry_rcu (list_p, &old_p->list, list) - { - if (list_p->post_handler) - { + if (p->post_handler) { + list_for_each_entry_rcu(list_p, &old_p->list, list) { + if (list_p->post_handler) { cleanup_p = 2; break; } } + if (cleanup_p == 0) old_p->post_handler = NULL; } } } -int dbi_register_jprobe (struct jprobe *jp) +int dbi_register_jprobe(struct jprobe *jp) { /* Todo: Verify probepoint is a function entry point */ jp->kp.pre_handler = setjmp_pre_handler; jp->kp.break_handler = longjmp_break_handler; - return dbi_register_kprobe (&jp->kp); + return dbi_register_kprobe(&jp->kp); } -void dbi_unregister_jprobe (struct jprobe *jp) +void dbi_unregister_jprobe(struct jprobe *jp) { - dbi_unregister_kprobe (&jp->kp, NULL); + dbi_unregister_kprobe(&jp->kp, NULL); } /* * This kprobe pre_handler is registered with every kretprobe. When probe * hits it will set up the return probe. */ -int pre_handler_kretprobe (struct kprobe *p, struct pt_regs *regs) +int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) { - struct kretprobe *rp = container_of (p, struct kretprobe, kp); + struct kretprobe *rp = container_of(p, struct kretprobe, kp); unsigned long flags = 0; DBPRINTF ("START\n"); /*TODO: consider to only swap the RA after the last pre_handler fired */ - spin_lock_irqsave (&kretprobe_lock, flags); + spin_lock_irqsave(&kretprobe_lock, flags); if (!rp->disarm) - __arch_prepare_kretprobe (rp, regs); - spin_unlock_irqrestore (&kretprobe_lock, flags); + __arch_prepare_kretprobe(rp, regs); + spin_unlock_irqrestore(&kretprobe_lock, flags); DBPRINTF ("END\n"); return 0; } @@ -623,44 +603,39 @@ struct kretprobe *sched_rp; int alloc_nodes_kretprobe(struct kretprobe *rp) { - int alloc_nodes; - struct kretprobe_instance *inst; - int i; + int alloc_nodes; + struct kretprobe_instance *inst; + int i; - DBPRINTF("Alloc aditional mem for retprobes"); + DBPRINTF("Alloc aditional mem for retprobes"); - if ((unsigned long)rp->kp.addr == sched_addr){ - rp->maxactive += SCHED_RP_NR;//max (100, 2 * NR_CPUS); - alloc_nodes = SCHED_RP_NR; - } - else - { + if ((unsigned long)rp->kp.addr == sched_addr) { + rp->maxactive += SCHED_RP_NR;//max (100, 2 * NR_CPUS); + alloc_nodes = SCHED_RP_NR; + } else { #if 1//def CONFIG_PREEMPT - rp->maxactive += max (COMMON_RP_NR, 2 * NR_CPUS); + rp->maxactive += max (COMMON_RP_NR, 2 * NR_CPUS); #else - rp->maxacpptive += NR_CPUS; + rp->maxacpptive += NR_CPUS; #endif - alloc_nodes = COMMON_RP_NR; - } - /* INIT_HLIST_HEAD (&rp->used_instances); */ - /* INIT_HLIST_HEAD (&rp->free_instances); */ - for (i = 0; i < alloc_nodes; i++) - { - inst = kmalloc (sizeof (struct kretprobe_instance), GFP_ATOMIC); - if (inst == NULL) - { - free_rp_inst (rp); - return -ENOMEM; - } - INIT_HLIST_NODE (&inst->uflist); - hlist_add_head (&inst->uflist, &rp->free_instances); - } - - DBPRINTF ("addr=%p, *addr=[%lx %lx %lx]", rp->kp.addr, (unsigned long) (*(rp->kp.addr)), (unsigned long) (*(rp->kp.addr + 1)), (unsigned long) (*(rp->kp.addr + 2))); - return 0; -} - -int dbi_register_kretprobe (struct kretprobe *rp) + alloc_nodes = COMMON_RP_NR; + } + + for (i = 0; i < alloc_nodes; i++) { + inst = kmalloc(sizeof(struct kretprobe_instance), GFP_ATOMIC); + if (inst == NULL) { + free_rp_inst(rp); + return -ENOMEM; + } + INIT_HLIST_NODE(&inst->uflist); + hlist_add_head(&inst->uflist, &rp->free_instances); + } + + DBPRINTF ("addr=%p, *addr=[%lx %lx %lx]", rp->kp.addr, (unsigned long) (*(rp->kp.addr)), (unsigned long) (*(rp->kp.addr + 1)), (unsigned long) (*(rp->kp.addr + 2))); + return 0; +} + +int dbi_register_kretprobe(struct kretprobe *rp) { int ret = 0; struct kretprobe_instance *inst; @@ -688,25 +663,23 @@ int dbi_register_kretprobe (struct kretprobe *rp) rp->maxactive = NR_CPUS; #endif } - INIT_HLIST_HEAD (&rp->used_instances); - INIT_HLIST_HEAD (&rp->free_instances); - for (i = 0; i < rp->maxactive; i++) - { - inst = kmalloc (sizeof (struct kretprobe_instance), GFP_KERNEL); - if (inst == NULL) - { - free_rp_inst (rp); + INIT_HLIST_HEAD(&rp->used_instances); + INIT_HLIST_HEAD(&rp->free_instances); + for (i = 0; i < rp->maxactive; i++) { + inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL); + if (inst == NULL) { + free_rp_inst(rp); return -ENOMEM; } - INIT_HLIST_NODE (&inst->uflist); - hlist_add_head (&inst->uflist, &rp->free_instances); + INIT_HLIST_NODE(&inst->uflist); + hlist_add_head(&inst->uflist, &rp->free_instances); } DBPRINTF ("addr=%p, *addr=[%lx %lx %lx]", rp->kp.addr, (unsigned long) (*(rp->kp.addr)), (unsigned long) (*(rp->kp.addr + 1)), (unsigned long) (*(rp->kp.addr + 2))); rp->nmissed = 0; /* Establish function entry probe point */ - if ((ret = dbi_register_kprobe (&rp->kp)) != 0) - free_rp_inst (rp); + if ((ret = dbi_register_kprobe(&rp->kp)) != 0) + free_rp_inst(rp); DBPRINTF ("addr=%p, *addr=[%lx %lx %lx]", rp->kp.addr, (unsigned long) (*(rp->kp.addr)), (unsigned long) (*(rp->kp.addr + 1)), (unsigned long) (*(rp->kp.addr + 2))); if ((unsigned long)rp->kp.addr == sched_addr) { @@ -718,20 +691,20 @@ int dbi_register_kretprobe (struct kretprobe *rp) static int dbi_disarm_krp_inst(struct kretprobe_instance *ri); -void dbi_unregister_kretprobe (struct kretprobe *rp) +void dbi_unregister_kretprobe(struct kretprobe *rp) { unsigned long flags; struct kretprobe_instance *ri; - dbi_unregister_kprobe (&rp->kp, NULL); + dbi_unregister_kprobe(&rp->kp, NULL); /* No race here */ - spin_lock_irqsave (&kretprobe_lock, flags); + spin_lock_irqsave(&kretprobe_lock, flags); if ((unsigned long)rp->kp.addr == sched_addr) sched_rp = NULL; - while ((ri = get_used_rp_inst (rp)) != NULL) { + while ((ri = get_used_rp_inst(rp)) != NULL) { if (dbi_disarm_krp_inst(ri) == 0) recycle_rp_inst(ri); else @@ -740,37 +713,34 @@ void dbi_unregister_kretprobe (struct kretprobe *rp) (unsigned long)rp->kp.addr); } - spin_unlock_irqrestore (&kretprobe_lock, flags); - free_rp_inst (rp); + spin_unlock_irqrestore(&kretprobe_lock, flags); + free_rp_inst(rp); } -struct kretprobe * clone_kretprobe (struct kretprobe *rp) +struct kretprobe *clone_kretprobe(struct kretprobe *rp) { struct kprobe *old_p; struct kretprobe *clone = NULL; int ret; - clone = kmalloc (sizeof (struct kretprobe), GFP_KERNEL); - if (!clone) - { + clone = kmalloc(sizeof(struct kretprobe), GFP_KERNEL); + if (!clone) { DBPRINTF ("failed to alloc memory for clone probe %p!", rp->kp.addr); return NULL; } - memcpy (clone, rp, sizeof (struct kretprobe)); + memcpy(clone, rp, sizeof(struct kretprobe)); clone->kp.pre_handler = pre_handler_kretprobe; clone->kp.post_handler = NULL; clone->kp.fault_handler = NULL; clone->kp.break_handler = NULL; old_p = get_kprobe(rp->kp.addr); - if (old_p) - { - ret = register_aggr_kprobe (old_p, &clone->kp); - if (ret) - { - kfree (clone); + if (old_p) { + ret = register_aggr_kprobe(old_p, &clone->kp); + if (ret) { + kfree(clone); return NULL; } - atomic_inc (&kprobe_count); + atomic_inc(&kprobe_count); } return clone; @@ -870,41 +840,39 @@ int patch_suspended_task(struct kretprobe *rp, struct task_struct *task) return 0; } -static int __init init_kprobes (void) +static int __init init_kprobes(void) { int i, err = 0; /* FIXME allocate the probe table, currently defined statically */ /* initialize all list heads */ - for (i = 0; i < KPROBE_TABLE_SIZE; i++) - { - INIT_HLIST_HEAD (&kprobe_table[i]); - INIT_HLIST_HEAD (&kretprobe_inst_table[i]); + for (i = 0; i < KPROBE_TABLE_SIZE; ++i) { + INIT_HLIST_HEAD(&kprobe_table[i]); + INIT_HLIST_HEAD(&kretprobe_inst_table[i]); } - atomic_set (&kprobe_count, 0); + atomic_set(&kprobe_count, 0); - err = arch_init_kprobes (); + err = arch_init_kprobes(); DBPRINTF ("init_kprobes: arch_init_kprobes - %d", err); return err; } -static void __exit exit_kprobes (void) +static void __exit exit_kprobes(void) { - dbi_arch_exit_kprobes (); + dbi_arch_exit_kprobes(); } -module_init (init_kprobes); -module_exit (exit_kprobes); - -EXPORT_SYMBOL_GPL (dbi_register_kprobe); -EXPORT_SYMBOL_GPL (dbi_unregister_kprobe); -EXPORT_SYMBOL_GPL (dbi_register_jprobe); -EXPORT_SYMBOL_GPL (dbi_unregister_jprobe); -EXPORT_SYMBOL_GPL (dbi_jprobe_return); -EXPORT_SYMBOL_GPL (dbi_register_kretprobe); -EXPORT_SYMBOL_GPL (dbi_unregister_kretprobe); +module_init(init_kprobes); +module_exit(exit_kprobes); -MODULE_LICENSE ("Dual BSD/GPL"); +EXPORT_SYMBOL_GPL(dbi_register_kprobe); +EXPORT_SYMBOL_GPL(dbi_unregister_kprobe); +EXPORT_SYMBOL_GPL(dbi_register_jprobe); +EXPORT_SYMBOL_GPL(dbi_unregister_jprobe); +EXPORT_SYMBOL_GPL(dbi_jprobe_return); +EXPORT_SYMBOL_GPL(dbi_register_kretprobe); +EXPORT_SYMBOL_GPL(dbi_unregister_kretprobe); +MODULE_LICENSE("Dual BSD/GPL"); |