Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

arch: deprecate z_arch_esf_t with struct arch_esf, introduce an arch-agnostic exception.h for it #73593

Merged
merged 9 commits into from
Jun 4, 2024
4 changes: 2 additions & 2 deletions arch/arc/core/fatal.c
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);

#ifdef CONFIG_EXCEPTION_DEBUG
static void dump_arc_esf(const z_arch_esf_t *esf)
static void dump_arc_esf(const struct arch_esf *esf)
{
ARC_EXCEPTION_DUMP(" r0: 0x%" PRIxPTR " r1: 0x%" PRIxPTR " r2: 0x%" PRIxPTR
" r3: 0x%" PRIxPTR "", esf->r0, esf->r1, esf->r2, esf->r3);
Expand All @@ -42,7 +42,7 @@ static void dump_arc_esf(const z_arch_esf_t *esf)
}
#endif

void z_arc_fatal_error(unsigned int reason, const z_arch_esf_t *esf)
void z_arc_fatal_error(unsigned int reason, const struct arch_esf *esf)
{
#ifdef CONFIG_EXCEPTION_DEBUG
if (esf != NULL) {
Expand Down
2 changes: 1 addition & 1 deletion arch/arc/core/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -346,7 +346,7 @@ static void dump_exception_info(uint32_t vector, uint32_t cause, uint32_t parame
* invokes the user provided routine k_sys_fatal_error_handler() which is
* responsible for implementing the error handling policy.
*/
void _Fault(z_arch_esf_t *esf, uint32_t old_sp)
void _Fault(struct arch_esf *esf, uint32_t old_sp)
{
uint32_t vector, cause, parameter;
uint32_t exc_addr = z_arc_v2_aux_reg_read(_ARC_V2_EFA);
Expand Down
6 changes: 3 additions & 3 deletions arch/arc/include/kernel_arch_data.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ extern "C" {
#endif

#ifdef CONFIG_ARC_HAS_SECURE
struct _irq_stack_frame {
struct arch_esf {
#ifdef CONFIG_ARC_HAS_ZOL
uintptr_t lp_end;
uintptr_t lp_start;
Expand Down Expand Up @@ -72,7 +72,7 @@ struct _irq_stack_frame {
uintptr_t status32;
};
#else
struct _irq_stack_frame {
struct arch_esf {
uintptr_t r0;
uintptr_t r1;
uintptr_t r2;
Expand Down Expand Up @@ -108,7 +108,7 @@ struct _irq_stack_frame {
};
#endif

typedef struct _irq_stack_frame _isf_t;
typedef struct arch_esf _isf_t;



Expand Down
2 changes: 1 addition & 1 deletion arch/arc/include/kernel_arch_func.h
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ extern void z_arc_userspace_enter(k_thread_entry_t user_entry, void *p1,
void *p2, void *p3, uint32_t stack, uint32_t size,
struct k_thread *thread);

extern void z_arc_fatal_error(unsigned int reason, const z_arch_esf_t *esf);
extern void z_arc_fatal_error(unsigned int reason, const struct arch_esf *esf);

extern void arch_sched_ipi(void);

Expand Down
8 changes: 4 additions & 4 deletions arch/arm/core/cortex_a_r/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ bool z_arm_fault_undef_instruction_fp(void)
*
* @return Returns true if the fault is fatal
*/
bool z_arm_fault_undef_instruction(z_arch_esf_t *esf)
bool z_arm_fault_undef_instruction(struct arch_esf *esf)
{
#if defined(CONFIG_FPU_SHARING)
/*
Expand Down Expand Up @@ -243,7 +243,7 @@ bool z_arm_fault_undef_instruction(z_arch_esf_t *esf)
*
* @return Returns true if the fault is fatal
*/
bool z_arm_fault_prefetch(z_arch_esf_t *esf)
bool z_arm_fault_prefetch(struct arch_esf *esf)
{
uint32_t reason = K_ERR_CPU_EXCEPTION;

Expand Down Expand Up @@ -299,7 +299,7 @@ static const struct z_exc_handle exceptions[] = {
*
* @return true if error is recoverable, otherwise return false.
*/
static bool memory_fault_recoverable(z_arch_esf_t *esf)
static bool memory_fault_recoverable(struct arch_esf *esf)
{
for (int i = 0; i < ARRAY_SIZE(exceptions); i++) {
/* Mask out instruction mode */
Expand All @@ -321,7 +321,7 @@ static bool memory_fault_recoverable(z_arch_esf_t *esf)
*
* @return Returns true if the fault is fatal
*/
bool z_arm_fault_data(z_arch_esf_t *esf)
bool z_arm_fault_data(struct arch_esf *esf)
{
uint32_t reason = K_ERR_CPU_EXCEPTION;

Expand Down
2 changes: 1 addition & 1 deletion arch/arm/core/cortex_a_r/irq_manage.c
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ void z_arm_irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags)
}
#endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */

void z_arm_fatal_error(unsigned int reason, const z_arch_esf_t *esf);
void z_arm_fatal_error(unsigned int reason, const struct arch_esf *esf);

/**
*
Expand Down
2 changes: 1 addition & 1 deletion arch/arm/core/cortex_m/coredump.c
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ struct arm_arch_block {
*/
static struct arm_arch_block arch_blk;

void arch_coredump_info_dump(const z_arch_esf_t *esf)
void arch_coredump_info_dump(const struct arch_esf *esf)
{
struct coredump_arch_hdr_t hdr = {
.id = COREDUMP_ARCH_HDR_ID,
Expand Down
58 changes: 29 additions & 29 deletions arch/arm/core/cortex_m/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
*/

#if (CONFIG_FAULT_DUMP == 1)
static void fault_show(const z_arch_esf_t *esf, int fault)
static void fault_show(const struct arch_esf *esf, int fault)
{
PR_EXC("Fault! EXC #%d", fault);

Expand All @@ -165,7 +165,7 @@ static void fault_show(const z_arch_esf_t *esf, int fault)
*
* For Dump level 0, no information needs to be generated.
*/
static void fault_show(const z_arch_esf_t *esf, int fault)
static void fault_show(const struct arch_esf *esf, int fault)
{
(void)esf;
(void)fault;
Expand All @@ -185,7 +185,7 @@ static const struct z_exc_handle exceptions[] = {
*
* @return true if error is recoverable, otherwise return false.
*/
static bool memory_fault_recoverable(z_arch_esf_t *esf, bool synchronous)
static bool memory_fault_recoverable(struct arch_esf *esf, bool synchronous)
{
#ifdef CONFIG_USERSPACE
for (int i = 0; i < ARRAY_SIZE(exceptions); i++) {
Expand Down Expand Up @@ -228,7 +228,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr,
*
* @return error code to identify the fatal error reason
*/
static uint32_t mem_manage_fault(z_arch_esf_t *esf, int from_hard_fault,
static uint32_t mem_manage_fault(struct arch_esf *esf, int from_hard_fault,
bool *recoverable)
{
uint32_t reason = K_ERR_ARM_MEM_GENERIC;
Expand Down Expand Up @@ -387,7 +387,7 @@ static uint32_t mem_manage_fault(z_arch_esf_t *esf, int from_hard_fault,
* @return error code to identify the fatal error reason.
*
*/
static int bus_fault(z_arch_esf_t *esf, int from_hard_fault, bool *recoverable)
static int bus_fault(struct arch_esf *esf, int from_hard_fault, bool *recoverable)
{
uint32_t reason = K_ERR_ARM_BUS_GENERIC;

Expand Down Expand Up @@ -549,7 +549,7 @@ static int bus_fault(z_arch_esf_t *esf, int from_hard_fault, bool *recoverable)
*
* @return error code to identify the fatal error reason
*/
static uint32_t usage_fault(const z_arch_esf_t *esf)
static uint32_t usage_fault(const struct arch_esf *esf)
{
uint32_t reason = K_ERR_ARM_USAGE_GENERIC;

Expand Down Expand Up @@ -612,7 +612,7 @@ static uint32_t usage_fault(const z_arch_esf_t *esf)
*
* @return error code to identify the fatal error reason
*/
static uint32_t secure_fault(const z_arch_esf_t *esf)
static uint32_t secure_fault(const struct arch_esf *esf)
{
uint32_t reason = K_ERR_ARM_SECURE_GENERIC;

Expand Down Expand Up @@ -661,7 +661,7 @@ static uint32_t secure_fault(const z_arch_esf_t *esf)
* See z_arm_fault_dump() for example.
*
*/
static void debug_monitor(z_arch_esf_t *esf, bool *recoverable)
static void debug_monitor(struct arch_esf *esf, bool *recoverable)
{
*recoverable = false;

Expand All @@ -687,7 +687,7 @@ static void debug_monitor(z_arch_esf_t *esf, bool *recoverable)
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */

static inline bool z_arm_is_synchronous_svc(z_arch_esf_t *esf)
static inline bool z_arm_is_synchronous_svc(struct arch_esf *esf)
{
uint16_t *ret_addr = (uint16_t *)esf->basic.pc;
/* SVC is a 16-bit instruction. On a synchronous SVC
Expand Down Expand Up @@ -762,7 +762,7 @@ static inline bool z_arm_is_pc_valid(uintptr_t pc)
*
* @return error code to identify the fatal error reason
*/
static uint32_t hard_fault(z_arch_esf_t *esf, bool *recoverable)
static uint32_t hard_fault(struct arch_esf *esf, bool *recoverable)
{
uint32_t reason = K_ERR_CPU_EXCEPTION;

Expand Down Expand Up @@ -829,7 +829,7 @@ static uint32_t hard_fault(z_arch_esf_t *esf, bool *recoverable)
* See z_arm_fault_dump() for example.
*
*/
static void reserved_exception(const z_arch_esf_t *esf, int fault)
static void reserved_exception(const struct arch_esf *esf, int fault)
{
ARG_UNUSED(esf);

Expand All @@ -839,7 +839,7 @@ static void reserved_exception(const z_arch_esf_t *esf, int fault)
}

/* Handler function for ARM fault conditions. */
static uint32_t fault_handle(z_arch_esf_t *esf, int fault, bool *recoverable)
static uint32_t fault_handle(struct arch_esf *esf, int fault, bool *recoverable)
{
uint32_t reason = K_ERR_CPU_EXCEPTION;

Expand Down Expand Up @@ -893,7 +893,7 @@ static uint32_t fault_handle(z_arch_esf_t *esf, int fault, bool *recoverable)
*
* @param secure_esf Pointer to the secure stack frame.
*/
static void secure_stack_dump(const z_arch_esf_t *secure_esf)
static void secure_stack_dump(const struct arch_esf *secure_esf)
{
/*
* In case a Non-Secure exception interrupted the Secure
Expand All @@ -918,7 +918,7 @@ static void secure_stack_dump(const z_arch_esf_t *secure_esf)
* Non-Secure exception entry.
*/
top_of_sec_stack += ADDITIONAL_STATE_CONTEXT_WORDS;
secure_esf = (const z_arch_esf_t *)top_of_sec_stack;
secure_esf = (const struct arch_esf *)top_of_sec_stack;
sec_ret_addr = secure_esf->basic.pc;
} else {
/* Exception during Non-Secure function call.
Expand Down Expand Up @@ -947,11 +947,11 @@ static void secure_stack_dump(const z_arch_esf_t *secure_esf)
*
* @return ESF pointer on success, otherwise return NULL
*/
static inline z_arch_esf_t *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_return,
static inline struct arch_esf *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_return,
bool *nested_exc)
{
bool alternative_state_exc = false;
z_arch_esf_t *ptr_esf = NULL;
struct arch_esf *ptr_esf = NULL;

*nested_exc = false;

Expand Down Expand Up @@ -979,14 +979,14 @@ static inline z_arch_esf_t *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_ret
alternative_state_exc = true;

/* Dump the Secure stack before handling the actual fault. */
z_arch_esf_t *secure_esf;
struct arch_esf *secure_esf;

if (exc_return & EXC_RETURN_SPSEL_PROCESS) {
/* Secure stack pointed by PSP */
secure_esf = (z_arch_esf_t *)psp;
secure_esf = (struct arch_esf *)psp;
} else {
/* Secure stack pointed by MSP */
secure_esf = (z_arch_esf_t *)msp;
secure_esf = (struct arch_esf *)msp;
*nested_exc = true;
}

Expand All @@ -997,9 +997,9 @@ static inline z_arch_esf_t *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_ret
* and supply it to the fault handing function.
*/
if (exc_return & EXC_RETURN_MODE_THREAD) {
ptr_esf = (z_arch_esf_t *)__TZ_get_PSP_NS();
ptr_esf = (struct arch_esf *)__TZ_get_PSP_NS();
} else {
ptr_esf = (z_arch_esf_t *)__TZ_get_MSP_NS();
ptr_esf = (struct arch_esf *)__TZ_get_MSP_NS();
}
}
#elif defined(CONFIG_ARM_NONSECURE_FIRMWARE)
Expand All @@ -1024,10 +1024,10 @@ static inline z_arch_esf_t *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_ret

if (exc_return & EXC_RETURN_SPSEL_PROCESS) {
/* Non-Secure stack frame on PSP */
ptr_esf = (z_arch_esf_t *)psp;
ptr_esf = (struct arch_esf *)psp;
} else {
/* Non-Secure stack frame on MSP */
ptr_esf = (z_arch_esf_t *)msp;
ptr_esf = (struct arch_esf *)msp;
}
} else {
/* Exception entry occurred in Non-Secure stack. */
Expand All @@ -1046,11 +1046,11 @@ static inline z_arch_esf_t *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_ret
if (!alternative_state_exc) {
if (exc_return & EXC_RETURN_MODE_THREAD) {
/* Returning to thread mode */
ptr_esf = (z_arch_esf_t *)psp;
ptr_esf = (struct arch_esf *)psp;

} else {
/* Returning to handler mode */
ptr_esf = (z_arch_esf_t *)msp;
ptr_esf = (struct arch_esf *)msp;
*nested_exc = true;
}
}
Expand Down Expand Up @@ -1095,12 +1095,12 @@ void z_arm_fault(uint32_t msp, uint32_t psp, uint32_t exc_return,
uint32_t reason = K_ERR_CPU_EXCEPTION;
int fault = SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk;
bool recoverable, nested_exc;
z_arch_esf_t *esf;
struct arch_esf *esf;

/* Create a stack-ed copy of the ESF to be used during
* the fault handling process.
*/
z_arch_esf_t esf_copy;
struct arch_esf esf_copy;

/* Force unlock interrupts */
arch_irq_unlock(0);
Expand All @@ -1123,13 +1123,13 @@ void z_arm_fault(uint32_t msp, uint32_t psp, uint32_t exc_return,

/* Copy ESF */
#if !defined(CONFIG_EXTRA_EXCEPTION_INFO)
memcpy(&esf_copy, esf, sizeof(z_arch_esf_t));
memcpy(&esf_copy, esf, sizeof(struct arch_esf));
ARG_UNUSED(callee_regs);
#else
/* the extra exception info is not present in the original esf
* so we only copy the fields before those.
*/
memcpy(&esf_copy, esf, offsetof(z_arch_esf_t, extra_info));
memcpy(&esf_copy, esf, offsetof(struct arch_esf, extra_info));
esf_copy.extra_info = (struct __extra_esf_info) {
.callee = callee_regs,
.exc_return = exc_return,
Expand Down
2 changes: 1 addition & 1 deletion arch/arm/core/cortex_m/irq_manage.c
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ void z_arm_irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags)

#endif /* !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER) */

void z_arm_fatal_error(unsigned int reason, const z_arch_esf_t *esf);
void z_arm_fatal_error(unsigned int reason, const struct arch_esf *esf);

/**
*
Expand Down
Loading
Loading