[kdb] Fixed interrupt path in 2.6.29-rc

Martin Hicks mort at sgi.com
Thu Feb 5 14:33:03 CST 2009


I think this patch fixes the interrupt path with the new out-of-line
save_args function.  I didn't find any way to deal with a function that
modifies the stack and where the caller needs to know the new stack
state.

If someone knows a better way to handle this then I'd be interested.

I still haven't worked my way through the changes to the exception path.

mh


From: Martin Hicks <mort at sgi.com>
Date: Wed, 4 Feb 2009 14:38:55 -0600
Subject: [PATCH] fix bb analysis for save_args and common_interrupt

save_args became a function which is called by the interrupt stubs.
This complicates life quite a bit because save_args backs up the
registers but also switches to the interrupt stack before returning
to call do_IRQ.

Signed-off-by: Martin Hicks <mort at sgi.com>
---
 arch/x86/kdb/kdba_bt.c |   46 ++++++++++++++++++++++++++++++++++++----------
 1 files changed, 36 insertions(+), 10 deletions(-)

diff --git a/arch/x86/kdb/kdba_bt.c b/arch/x86/kdb/kdba_bt.c
index 62a8444..631357a 100644
--- a/arch/x86/kdb/kdba_bt.c
+++ b/arch/x86/kdb/kdba_bt.c
@@ -253,7 +253,7 @@ struct bb_name_state {
 		.skip_mem.bits[0] = iskip_mem, \
 		.skip_regs.bits[0] = iskip_regs, \
 		.osp_offset = iosp_offset, \
-       		.address = 0 \
+		.address = 0 \
 	}
 
 /* Shorter forms for the common cases */
@@ -576,6 +576,7 @@ static struct bb_name_state bb_special_cases[] = {
 	NS("general_protection", error_code, all_regs, 0, 0, 0),
 	NS("error_entry", error_code_rax, all_regs, 0, BB_SKIP(RAX), -0x10),
 	NS("common_interrupt", error_code, all_regs, 0, 0, -0x8),
+	NS("save_args", error_code, all_regs, 0, 0, -0x50),
 };
 
 static const char *bb_spurious[] = {
@@ -1352,7 +1353,8 @@ static struct bb_actual bb_actual[KDB_INT_REGISTERS];
 static bfd_vma bb_func_start, bb_func_end;
 static bfd_vma bb_common_interrupt, bb_error_entry, bb_ret_from_intr,
 	       bb_thread_return, bb_sync_regs, bb_save_v86_state,
-	       bb__sched_text_start, bb__sched_text_end;
+	       bb__sched_text_start, bb__sched_text_end,
+	       bb_save_args;
 
 /* Record jmp instructions, both conditional and unconditional.  These form the
  * arcs between the basic blocks.  This is also used to record the state when
@@ -3447,20 +3449,42 @@ bb_usage(void)
 		}
 		break;
 	case BBOU_CALL:
-		/* Invalidate the scratch registers.  Functions sync_regs and
-		 * save_v86_state are special, their return value is the new
-		 * stack pointer.
-		 */
 		bb_reg_state_print(bb_reg_state);
-		bb_invalidate_scratch_reg();
 		if (bb_is_static_disp(src)) {
+			/* Function sync_regs and save_v86_state are special.
+			 * Their return value is the new stack pointer
+			 */
 			if (src->disp == bb_sync_regs) {
 				bb_reg_set_reg(BBRG_RAX, BBRG_RSP);
 			} else if (src->disp == bb_save_v86_state) {
 				bb_reg_set_reg(BBRG_RAX, BBRG_RSP);
 				bb_adjust_osp(BBRG_RAX, +KDB_WORD_SIZE);
 			}
+			/* Function save_args is special also.  It saves
+			 * a partial pt_regs onto the stack and switches
+			 * to the interrupt stack.
+			 */
+			else if (src->disp == bb_save_args) {
+				bb_memory_set_reg(BBRG_RSP, BBRG_RDI, 0x48);
+				bb_memory_set_reg(BBRG_RSP, BBRG_RSI, 0x40);
+				bb_memory_set_reg(BBRG_RSP, BBRG_RDX, 0x38);
+				bb_memory_set_reg(BBRG_RSP, BBRG_RCX, 0x30);
+				bb_memory_set_reg(BBRG_RSP, BBRG_RAX, 0x28);
+				bb_memory_set_reg(BBRG_RSP, BBRG_R8,  0x20);
+				bb_memory_set_reg(BBRG_RSP, BBRG_R9,  0x18);
+				bb_memory_set_reg(BBRG_RSP, BBRG_R10, 0x10);
+				bb_memory_set_reg(BBRG_RSP, BBRG_R11, 0x08);
+				bb_memory_set_reg(BBRG_RSP, BBRG_RBP, 0);
+				/* This is actually on the interrupt stack,
+				 * but we fudge it so the unwind works.
+				 */
+				bb_memory_set_reg_value(BBRG_RSP, -0x8, BBRG_RBP, 0);
+				bb_reg_set_reg(BBRG_RBP, BBRG_RSP);
+				bb_adjust_osp(BBRG_RSP, -KDB_WORD_SIZE);
+			}
 		}
+		/* Invalidate the scratch registers */
+		bb_invalidate_scratch_reg();
 		usage = BBOU_NOP;
 		break;
 	case BBOU_CBW:
@@ -3784,7 +3808,8 @@ bb_usage(void)
 		    strcmp(bb_func_name, "relocate_kernel") == 0 ||
 		    strcmp(bb_func_name, "identity_mapped") == 0 ||
 		    strcmp(bb_func_name, "xen_iret_crit_fixup") == 0 ||
-		    strcmp(bb_func_name, "math_abort") == 0)
+		    strcmp(bb_func_name, "math_abort") == 0 ||
+		    strcmp(bb_func_name, "save_args") == 0)
 			break;
 		bb_sanity_check(0);
 		break;
@@ -4452,7 +4477,7 @@ bb_actual_rollback(const struct kdb_activation_record *ar)
 			   __FUNCTION__, bb_actual_value(BBRG_RSP));
 	i = BBRG_RSP;
 	if (!bb_is_osp_defined(i)) {
-	       	for (i = BBRG_RAX; i < BBRG_RAX + KDB_INT_REGISTERS; ++i) {
+		for (i = BBRG_RAX; i < BBRG_RAX + KDB_INT_REGISTERS; ++i) {
 			if (bb_is_osp_defined(i) && bb_actual_valid(i))
 				break;
 		}
@@ -5176,7 +5201,7 @@ static void
 kdba_bt_new_stack(struct kdb_activation_record *ar, kdb_machreg_t *rsp,
 		   int *count, int *suppress)
 {
-	/* Nasty: common_interrupt builds a partial pt_regs, with r15 through
+	/* Nasty: save_args builds a partial pt_regs, with r15 through
 	 * rbx not being filled in.  It passes struct pt_regs* to do_IRQ (in
 	 * rdi) but the stack pointer is not adjusted to account for r15
 	 * through rbx.  This has two effects :-
@@ -5573,6 +5598,7 @@ static int __init kdba_bt_x86_init(void)
 	bb_save_v86_state = kallsyms_lookup_name("save_v86_state");
 	bb__sched_text_start = kallsyms_lookup_name("__sched_text_start");
 	bb__sched_text_end = kallsyms_lookup_name("__sched_text_end");
+	bb_save_args = kallsyms_lookup_name("save_args");
 	for (i = 0, r = bb_special_cases;
 	     i < ARRAY_SIZE(bb_special_cases);
 	     ++i, ++r) {
-- 
1.5.4.rc3




More information about the kdb mailing list