Commit 62c27be0 authored by bibo,mao's avatar bibo,mao Committed by Linus Torvalds

[PATCH] kprobe whitespace cleanup

Whitespace is used to indent, this patch cleans up these sentences by
kernel coding style.
Signed-off-by: default avatarbibo, mao <bibo.mao@intel.com>
Signed-off-by: default avatarAnanth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 09b18203
......@@ -230,20 +230,20 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
struct pt_regs *regs)
{
unsigned long *sara = (unsigned long *)&regs->esp;
struct kretprobe_instance *ri;
if ((ri = get_free_rp_inst(rp)) != NULL) {
ri->rp = rp;
ri->task = current;
struct kretprobe_instance *ri;
if ((ri = get_free_rp_inst(rp)) != NULL) {
ri->rp = rp;
ri->task = current;
ri->ret_addr = (kprobe_opcode_t *) *sara;
/* Replace the return addr with trampoline addr */
*sara = (unsigned long) &kretprobe_trampoline;
add_rp_inst(ri);
} else {
rp->nmissed++;
}
add_rp_inst(ri);
} else {
rp->nmissed++;
}
}
/*
......@@ -359,7 +359,7 @@ no_kprobe:
void __kprobes kretprobe_trampoline_holder(void)
{
asm volatile ( ".global kretprobe_trampoline\n"
"kretprobe_trampoline: \n"
"kretprobe_trampoline: \n"
" pushf\n"
/* skip cs, eip, orig_eax, es, ds */
" subl $20, %esp\n"
......@@ -395,14 +395,14 @@ no_kprobe:
*/
fastcall void *__kprobes trampoline_handler(struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head;
struct hlist_node *node, *tmp;
struct kretprobe_instance *ri = NULL;
struct hlist_head *head;
struct hlist_node *node, *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
spin_lock_irqsave(&kretprobe_lock, flags);
head = kretprobe_inst_table_head(current);
head = kretprobe_inst_table_head(current);
/*
* It is possible to have multiple instances associated with a given
......@@ -413,14 +413,14 @@ fastcall void *__kprobes trampoline_handler(struct pt_regs *regs)
* We can handle this because:
* - instances are always inserted at the head of the list
* - when multiple return probes are registered for the same
* function, the first instance's ret_addr will point to the
* function, the first instance's ret_addr will point to the
* real return address, and all the rest will point to
* kretprobe_trampoline
*/
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
if (ri->task != current)
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
continue;
if (ri->rp && ri->rp->handler){
__get_cpu_var(current_kprobe) = &ri->rp->kp;
......
......@@ -90,7 +90,7 @@ static void __kprobes update_kprobe_inst_flag(uint template, uint slot,
p->ainsn.target_br_reg = 0;
/* Check for Break instruction
* Bits 37:40 Major opcode to be zero
* Bits 37:40 Major opcode to be zero
* Bits 27:32 X6 to be zero
* Bits 32:35 X3 to be zero
*/
......@@ -104,19 +104,19 @@ static void __kprobes update_kprobe_inst_flag(uint template, uint slot,
switch (major_opcode) {
case INDIRECT_CALL_OPCODE:
p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
break;
p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
break;
case IP_RELATIVE_PREDICT_OPCODE:
case IP_RELATIVE_BRANCH_OPCODE:
p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR;
break;
break;
case IP_RELATIVE_CALL_OPCODE:
p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR;
p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
break;
p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR;
p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
break;
}
} else if (bundle_encoding[template][slot] == X) {
} else if (bundle_encoding[template][slot] == X) {
switch (major_opcode) {
case LONG_CALL_OPCODE:
p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
......@@ -258,18 +258,18 @@ static void __kprobes get_kprobe_inst(bundle_t *bundle, uint slot,
switch (slot) {
case 0:
*major_opcode = (bundle->quad0.slot0 >> SLOT0_OPCODE_SHIFT);
*kprobe_inst = bundle->quad0.slot0;
break;
*major_opcode = (bundle->quad0.slot0 >> SLOT0_OPCODE_SHIFT);
*kprobe_inst = bundle->quad0.slot0;
break;
case 1:
*major_opcode = (bundle->quad1.slot1_p1 >> SLOT1_p1_OPCODE_SHIFT);
kprobe_inst_p0 = bundle->quad0.slot1_p0;
kprobe_inst_p1 = bundle->quad1.slot1_p1;
*kprobe_inst = kprobe_inst_p0 | (kprobe_inst_p1 << (64-46));
*major_opcode = (bundle->quad1.slot1_p1 >> SLOT1_p1_OPCODE_SHIFT);
kprobe_inst_p0 = bundle->quad0.slot1_p0;
kprobe_inst_p1 = bundle->quad1.slot1_p1;
*kprobe_inst = kprobe_inst_p0 | (kprobe_inst_p1 << (64-46));
break;
case 2:
*major_opcode = (bundle->quad1.slot2 >> SLOT2_OPCODE_SHIFT);
*kprobe_inst = bundle->quad1.slot2;
*major_opcode = (bundle->quad1.slot2 >> SLOT2_OPCODE_SHIFT);
*kprobe_inst = bundle->quad1.slot2;
break;
}
}
......@@ -290,11 +290,11 @@ static int __kprobes valid_kprobe_addr(int template, int slot,
return -EINVAL;
}
if (in_ivt_functions(addr)) {
printk(KERN_WARNING "Kprobes can't be inserted inside "
if (in_ivt_functions(addr)) {
printk(KERN_WARNING "Kprobes can't be inserted inside "
"IVT functions at 0x%lx\n", addr);
return -EINVAL;
}
return -EINVAL;
}
if (slot == 1 && bundle_encoding[template][1] != L) {
printk(KERN_WARNING "Inserting kprobes on slot #1 "
......@@ -424,14 +424,14 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
bundle_t *bundle;
bundle = &((kprobe_opcode_t *)kprobe_addr)->bundle;
template = bundle->quad0.template;
template = bundle->quad0.template;
if(valid_kprobe_addr(template, slot, addr))
return -EINVAL;
/* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */
if (slot == 1 && bundle_encoding[template][1] == L)
slot++;
if (slot == 1 && bundle_encoding[template][1] == L)
slot++;
/* Get kprobe_inst and major_opcode from the bundle */
get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode);
......@@ -489,21 +489,22 @@ void __kprobes arch_remove_kprobe(struct kprobe *p)
*/
static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
{
unsigned long bundle_addr = (unsigned long) (&p->ainsn.insn->bundle);
unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL;
unsigned long template;
int slot = ((unsigned long)p->addr & 0xf);
unsigned long bundle_addr = (unsigned long) (&p->ainsn.insn->bundle);
unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL;
unsigned long template;
int slot = ((unsigned long)p->addr & 0xf);
template = p->ainsn.insn->bundle.quad0.template;
if (slot == 1 && bundle_encoding[template][1] == L)
slot = 2;
if (slot == 1 && bundle_encoding[template][1] == L)
slot = 2;
if (p->ainsn.inst_flag) {
if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) {
/* Fix relative IP address */
regs->cr_iip = (regs->cr_iip - bundle_addr) + resume_addr;
regs->cr_iip = (regs->cr_iip - bundle_addr) +
resume_addr;
}
if (p->ainsn.inst_flag & INST_FLAG_FIX_BRANCH_REG) {
......@@ -540,18 +541,18 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
}
if (slot == 2) {
if (regs->cr_iip == bundle_addr + 0x10) {
regs->cr_iip = resume_addr + 0x10;
}
} else {
if (regs->cr_iip == bundle_addr) {
regs->cr_iip = resume_addr;
}
if (regs->cr_iip == bundle_addr + 0x10) {
regs->cr_iip = resume_addr + 0x10;
}
} else {
if (regs->cr_iip == bundle_addr) {
regs->cr_iip = resume_addr;
}
}
turn_ss_off:
/* Turn off Single Step bit */
ia64_psr(regs)->ss = 0;
/* Turn off Single Step bit */
ia64_psr(regs)->ss = 0;
}
static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs)
......@@ -587,7 +588,7 @@ static int __kprobes is_ia64_break_inst(struct pt_regs *regs)
/* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */
if (slot == 1 && bundle_encoding[template][1] == L)
slot++;
slot++;
/* Get Kprobe probe instruction at given slot*/
get_kprobe_inst(&bundle, slot, &kprobe_inst, &major_opcode);
......@@ -627,7 +628,7 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
if (p) {
if ((kcb->kprobe_status == KPROBE_HIT_SS) &&
(p->ainsn.inst_flag == INST_FLAG_BREAK_INST)) {
ia64_psr(regs)->ss = 0;
ia64_psr(regs)->ss = 0;
goto no_kprobe;
}
/* We have reentered the pre_kprobe_handler(), since
......@@ -887,7 +888,7 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
* fix the return address to our jprobe_inst_return() function
* in the jprobes.S file
*/
regs->b0 = ((struct fnptr *)(jprobe_inst_return))->ip;
regs->b0 = ((struct fnptr *)(jprobe_inst_return))->ip;
return 1;
}
......
......@@ -259,14 +259,14 @@ void kretprobe_trampoline_holder(void)
*/
int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head;
struct hlist_node *node, *tmp;
struct kretprobe_instance *ri = NULL;
struct hlist_head *head;
struct hlist_node *node, *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
spin_lock_irqsave(&kretprobe_lock, flags);
head = kretprobe_inst_table_head(current);
head = kretprobe_inst_table_head(current);
/*
* It is possible to have multiple instances associated with a given
......@@ -277,14 +277,14 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
* We can handle this because:
* - instances are always inserted at the head of the list
* - when multiple return probes are registered for the same
* function, the first instance's ret_addr will point to the
* function, the first instance's ret_addr will point to the
* real return address, and all the rest will point to
* kretprobe_trampoline
*/
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
if (ri->task != current)
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
continue;
if (ri->rp && ri->rp->handler)
ri->rp->handler(ri, regs);
......@@ -308,12 +308,12 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
spin_unlock_irqrestore(&kretprobe_lock, flags);
preempt_enable_no_resched();
/*
* By returning a non-zero value, we are telling
* kprobe_handler() that we don't want the post_handler
* to run (and have re-enabled preemption)
*/
return 1;
/*
* By returning a non-zero value, we are telling
* kprobe_handler() that we don't want the post_handler
* to run (and have re-enabled preemption)
*/
return 1;
}
/*
......
......@@ -270,20 +270,19 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
struct pt_regs *regs)
{
unsigned long *sara = (unsigned long *)regs->rsp;
struct kretprobe_instance *ri;
struct kretprobe_instance *ri;
if ((ri = get_free_rp_inst(rp)) != NULL) {
ri->rp = rp;
ri->task = current;
if ((ri = get_free_rp_inst(rp)) != NULL) {
ri->rp = rp;
ri->task = current;
ri->ret_addr = (kprobe_opcode_t *) *sara;
/* Replace the return addr with trampoline addr */
*sara = (unsigned long) &kretprobe_trampoline;
add_rp_inst(ri);
} else {
rp->nmissed++;
}
add_rp_inst(ri);
} else {
rp->nmissed++;
}
}
int __kprobes kprobe_handler(struct pt_regs *regs)
......@@ -405,14 +404,14 @@ no_kprobe:
*/
int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head;
struct hlist_node *node, *tmp;
struct kretprobe_instance *ri = NULL;
struct hlist_head *head;
struct hlist_node *node, *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
spin_lock_irqsave(&kretprobe_lock, flags);
head = kretprobe_inst_table_head(current);
head = kretprobe_inst_table_head(current);
/*
* It is possible to have multiple instances associated with a given
......@@ -423,14 +422,14 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
* We can handle this because:
* - instances are always inserted at the head of the list
* - when multiple return probes are registered for the same
* function, the first instance's ret_addr will point to the
* function, the first instance's ret_addr will point to the
* real return address, and all the rest will point to
* kretprobe_trampoline
*/
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
if (ri->task != current)
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
continue;
if (ri->rp && ri->rp->handler)
ri->rp->handler(ri, regs);
......@@ -454,12 +453,12 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
spin_unlock_irqrestore(&kretprobe_lock, flags);
preempt_enable_no_resched();
/*
* By returning a non-zero value, we are telling
* kprobe_handler() that we don't want the post_handler
/*
* By returning a non-zero value, we are telling
* kprobe_handler() that we don't want the post_handler
* to run (and have re-enabled preemption)
*/
return 1;
*/
return 1;
}
/*
......
......@@ -347,17 +347,17 @@ struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
*/
void __kprobes kprobe_flush_task(struct task_struct *tk)
{
struct kretprobe_instance *ri;
struct hlist_head *head;
struct kretprobe_instance *ri;
struct hlist_head *head;
struct hlist_node *node, *tmp;
unsigned long flags = 0;
spin_lock_irqsave(&kretprobe_lock, flags);
head = kretprobe_inst_table_head(tk);
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
if (ri->task == tk)
recycle_rp_inst(ri);
}
head = kretprobe_inst_table_head(tk);
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
if (ri->task == tk)
recycle_rp_inst(ri);
}
spin_unlock_irqrestore(&kretprobe_lock, flags);
}
......@@ -514,7 +514,7 @@ static int __kprobes __register_kprobe(struct kprobe *p,
(ARCH_INACTIVE_KPROBE_COUNT + 1))
register_page_fault_notifier(&kprobe_page_fault_nb);
arch_arm_kprobe(p);
arch_arm_kprobe(p);
out:
mutex_unlock(&kprobe_mutex);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment