
From: Prasanna S Panchamukhi <prasanna@in.ibm.com>

- Kprobes structure has been modified to support copying of original
  instruction as required by the architecture.  On x86_64 normal pages we
  get from kmalloc or vmalloc are not executable.  Single-stepping an
  instruction on such a page yields an oops.  So instead of storing the
  instruction copies in their respective kprobe objects, we allocate a
  page, map it executable, and store all the instruction copies there and
  store the pointer of the copied instruction in the specific kprobes
  object.

- jprobe_return_end is moved into inline assembly to avoid compiler
  optimization.

- arch_prepare_kprobe() now returns an integer,since
  arch_prepare_kprobe() might fail on other architectures.

- added arch_remove_kprobe() routine, since other architectures requires
  it.

Signed-off-by: Prasanna S Panchamukhi <prasanna@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 25-akpm/arch/i386/kernel/kprobes.c |   32 +++++++++++++++++++-------------
 25-akpm/include/asm-i386/kprobes.h |    7 +++++++
 25-akpm/include/linux/kprobes.h    |    5 +++--
 25-akpm/kernel/kprobes.c           |    6 +++++-
 4 files changed, 34 insertions(+), 16 deletions(-)

diff -puN arch/i386/kernel/kprobes.c~kprobes-minor-i386-changes-required-for-porting-kprobes-to-x86_64 arch/i386/kernel/kprobes.c
--- 25/arch/i386/kernel/kprobes.c~kprobes-minor-i386-changes-required-for-porting-kprobes-to-x86_64	Mon Nov  1 15:36:58 2004
+++ 25-akpm/arch/i386/kernel/kprobes.c	Mon Nov  1 15:36:58 2004
@@ -42,6 +42,7 @@ static struct pt_regs jprobe_saved_regs;
 static long *jprobe_saved_esp;
 /* copy of the kernel stack at the probe fire time */
 static kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
+void jprobe_return_end(void);
 
 /*
  * returns non-zero if opcode modifies the interrupt flag.
@@ -58,9 +59,14 @@ static inline int is_IF_modifier(kprobe_
 	return 0;
 }
 
-void arch_prepare_kprobe(struct kprobe *p)
+int arch_prepare_kprobe(struct kprobe *p)
+{
+	memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+	return 0;
+}
+
+void arch_remove_kprobe(struct kprobe *p)
 {
-	memcpy(p->insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
 }
 
 static inline void disarm_kprobe(struct kprobe *p, struct pt_regs *regs)
@@ -73,7 +79,7 @@ static inline void prepare_singlestep(st
 {
 	regs->eflags |= TF_MASK;
 	regs->eflags &= ~IF_MASK;
-	regs->eip = (unsigned long)&p->insn;
+	regs->eip = (unsigned long)&p->ainsn.insn;
 }
 
 /*
@@ -153,7 +159,7 @@ static inline int kprobe_handler(struct 
  * instruction.  To avoid the SMP problems that can occur when we
  * temporarily put back the original opcode to single-step, we
  * single-stepped a copy of the instruction.  The address of this
- * copy is p->insn.
+ * copy is p->ainsn.insn.
  *
  * This function prepares to return from the post-single-step
  * interrupt.  We have to fix up the stack as follows:
@@ -173,10 +179,10 @@ static void resume_execution(struct kpro
 {
 	unsigned long *tos = (unsigned long *)&regs->esp;
 	unsigned long next_eip = 0;
-	unsigned long copy_eip = (unsigned long)&p->insn;
+	unsigned long copy_eip = (unsigned long)&p->ainsn.insn;
 	unsigned long orig_eip = (unsigned long)p->addr;
 
-	switch (p->insn[0]) {
+	switch (p->ainsn.insn[0]) {
 	case 0x9c:		/* pushfl */
 		*tos &= ~(TF_MASK | IF_MASK);
 		*tos |= kprobe_old_eflags;
@@ -185,13 +191,13 @@ static void resume_execution(struct kpro
 		*tos = orig_eip + (*tos - copy_eip);
 		break;
 	case 0xff:
-		if ((p->insn[1] & 0x30) == 0x10) {
+		if ((p->ainsn.insn[1] & 0x30) == 0x10) {
 			/* call absolute, indirect */
 			/* Fix return addr; eip is correct. */
 			next_eip = regs->eip;
 			*tos = orig_eip + (*tos - copy_eip);
-		} else if (((p->insn[1] & 0x31) == 0x20) ||	/* jmp near, absolute indirect */
-			   ((p->insn[1] & 0x31) == 0x21)) {	/* jmp far, absolute indirect */
+		} else if (((p->ainsn.insn[1] & 0x31) == 0x20) ||	/* jmp near, absolute indirect */
+			   ((p->ainsn.insn[1] & 0x31) == 0x21)) {	/* jmp far, absolute indirect */
 			/* eip is correct. */
 			next_eip = regs->eip;
 		}
@@ -315,12 +321,12 @@ void jprobe_return(void)
 {
 	preempt_enable_no_resched();
 	asm volatile ("       xchgl   %%ebx,%%esp     \n"
-		      "       int3			\n"::"b"
+		      "       int3			\n"
+		      "       .globl jprobe_return_end	\n"
+		      "       jprobe_return_end:	\n"
+		      "       nop			\n"::"b"
 		      (jprobe_saved_esp):"memory");
 }
-void jprobe_return_end(void)
-{
-};
 
 int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
 {
diff -puN include/asm-i386/kprobes.h~kprobes-minor-i386-changes-required-for-porting-kprobes-to-x86_64 include/asm-i386/kprobes.h
--- 25/include/asm-i386/kprobes.h~kprobes-minor-i386-changes-required-for-porting-kprobes-to-x86_64	Mon Nov  1 15:36:58 2004
+++ 25-akpm/include/asm-i386/kprobes.h	Mon Nov  1 15:36:58 2004
@@ -38,6 +38,13 @@ typedef u8 kprobe_opcode_t;
 	? (MAX_STACK_SIZE) \
 	: (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
 
+/* Architecture specific copy of original instruction*/
+struct arch_specific_insn {
+	/* copy of the original instruction */
+	kprobe_opcode_t insn[MAX_INSN_SIZE];
+};
+
+
 /* trap3/1 are intr gates for kprobes.  So, restore the status of IF,
  * if necessary, before executing the original int3/1 (trap) handler.
  */
diff -puN include/linux/kprobes.h~kprobes-minor-i386-changes-required-for-porting-kprobes-to-x86_64 include/linux/kprobes.h
--- 25/include/linux/kprobes.h~kprobes-minor-i386-changes-required-for-porting-kprobes-to-x86_64	Mon Nov  1 15:36:58 2004
+++ 25-akpm/include/linux/kprobes.h	Mon Nov  1 15:36:58 2004
@@ -64,7 +64,7 @@ struct kprobe {
 	kprobe_opcode_t opcode;
 
 	/* copy of the original instruction */
-	kprobe_opcode_t insn[MAX_INSN_SIZE];
+	struct arch_specific_insn ainsn;
 };
 
 /*
@@ -94,7 +94,8 @@ static inline int kprobe_running(void)
 	return kprobe_cpu == smp_processor_id();
 }
 
-extern void arch_prepare_kprobe(struct kprobe *p);
+extern int arch_prepare_kprobe(struct kprobe *p);
+extern void arch_remove_kprobe(struct kprobe *p);
 extern void show_registers(struct pt_regs *regs);
 
 /* Get the kprobe at this addr (if any).  Must have called lock_kprobes */
diff -puN kernel/kprobes.c~kprobes-minor-i386-changes-required-for-porting-kprobes-to-x86_64 kernel/kprobes.c
--- 25/kernel/kprobes.c~kprobes-minor-i386-changes-required-for-porting-kprobes-to-x86_64	Mon Nov  1 15:36:58 2004
+++ 25-akpm/kernel/kprobes.c	Mon Nov  1 15:36:58 2004
@@ -84,10 +84,13 @@ int register_kprobe(struct kprobe *p)
 		ret = -EEXIST;
 		goto out;
 	}
+
+	if ((ret = arch_prepare_kprobe(p)) != 0) {
+		goto out;
+	}
 	hlist_add_head(&p->hlist,
 		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
 
-	arch_prepare_kprobe(p);
 	p->opcode = *p->addr;
 	*p->addr = BREAKPOINT_INSTRUCTION;
 	flush_icache_range((unsigned long) p->addr,
@@ -101,6 +104,7 @@ void unregister_kprobe(struct kprobe *p)
 {
 	unsigned long flags;
 	spin_lock_irqsave(&kprobe_lock, flags);
+	arch_remove_kprobe(p);
 	*p->addr = p->opcode;
 	hlist_del(&p->hlist);
 	flush_icache_range((unsigned long) p->addr,
_
