diff --git a/qemu/target/arm/helper.c b/qemu/target/arm/helper.c index 4bb57ecc..92ce0d6e 100644 --- a/qemu/target/arm/helper.c +++ b/qemu/target/arm/helper.c @@ -5724,7 +5724,7 @@ static uint32_t arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure) return addr; } -static void v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain, +static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain, bool ignore_faults) { /* For v8M, push the callee-saves register part of the stack frame. @@ -5732,31 +5732,55 @@ static void v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain, * In the tailchaining case this may not be the current stack. */ CPUARMState *env = &cpu->env; - CPUState *cs = CPU(cpu); uint32_t *frame_sp_p; uint32_t frameptr; + ARMMMUIdx mmu_idx; + bool stacked_ok; if (dotailchain) { - frame_sp_p = get_v7m_sp_ptr(env, true, - lr & R_V7M_EXCRET_MODE_MASK, + bool mode = lr & R_V7M_EXCRET_MODE_MASK; + bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) || + !mode; + + mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv); + frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode, lr & R_V7M_EXCRET_SPSEL_MASK); } else { + mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false)); frame_sp_p = &env->regs[13]; } frameptr = *frame_sp_p - 0x28; - stl_phys(cs->as, frameptr, 0xfefa125b); - stl_phys(cs->as, frameptr + 0x8, env->regs[4]); - stl_phys(cs->as, frameptr + 0xc, env->regs[5]); - stl_phys(cs->as, frameptr + 0x10, env->regs[6]); - stl_phys(cs->as, frameptr + 0x14, env->regs[7]); - stl_phys(cs->as, frameptr + 0x18, env->regs[8]); - stl_phys(cs->as, frameptr + 0x1c, env->regs[9]); - stl_phys(cs->as, frameptr + 0x20, env->regs[10]); - stl_phys(cs->as, frameptr + 0x24, env->regs[11]); + /* Write as much of the stack frame as we can. A write failure may + * cause us to pend a derived exception. + */ + stacked_ok = + v7m_stack_write(cpu, frameptr, 0xfefa125b, mmu_idx, ignore_faults) && + v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx, + ignore_faults) && + v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx, + ignore_faults) && + v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx, + ignore_faults) && + v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx, + ignore_faults) && + v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx, + ignore_faults) && + v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx, + ignore_faults) && + v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx, + ignore_faults) && + v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, + ignore_faults); + /* Update SP regardless of whether any of the stack accesses failed. + * When we implement v8M stack limit checking then this attempt to + * update SP might also fail and result in a derived exception. + */ *frame_sp_p = frameptr; + + return !stacked_ok; } static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain, @@ -5770,6 +5794,7 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain, uint32_t addr; bool targets_secure = false; int exc = 0; + bool push_failed = false; // Unicorn: commented out //armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure); @@ -5798,8 +5823,8 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain, */ if (lr & R_V7M_EXCRET_DCRS_MASK && !(dotailchain && (lr & R_V7M_EXCRET_ES_MASK))) { - v7m_push_callee_stack(cpu, lr, dotailchain, - ignore_stackfaults); + push_failed = v7m_push_callee_stack(cpu, lr, dotailchain, + ignore_stackfaults); } lr |= R_V7M_EXCRET_DCRS_MASK; } @@ -5843,6 +5868,15 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain, addr = arm_v7m_load_vector(cpu, exc, targets_secure); + if (push_failed && !ignore_stackfaults) { + /* Derived exception on callee-saves register stacking: + * we might now want to take a different exception which + * targets a different security state, so try again from the top. + */ + v7m_exception_taken(cpu, lr, true, true); + return; + } + /* Now we've done everything that might cause a derived exception * we can go ahead and activate whichever exception we're going to * take (which might now be the derived exception).