Initial commit

This commit is contained in:
Julien Chevalley
2023-12-11 14:43:05 +01:00
commit 902141e8b6
1103 changed files with 810796 additions and 0 deletions

View File

@ -0,0 +1,544 @@
/**************************************************************************//**
* @file cmsis_armcc.h
* @brief CMSIS compiler specific macros, functions, instructions
* @version V1.0.2
* @date 10. January 2018
******************************************************************************/
/*
* Copyright (c) 2009-2018 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __CMSIS_ARMCC_H
#define __CMSIS_ARMCC_H
#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 400677)
#error "Please use Arm Compiler Toolchain V4.0.677 or later!"
#endif
/* CMSIS compiler control architecture macros */
#if (defined (__TARGET_ARCH_7_A ) && (__TARGET_ARCH_7_A == 1))
#define __ARM_ARCH_7A__ 1
#endif
/* CMSIS compiler specific defines */
#ifndef __ASM
#define __ASM __asm
#endif
#ifndef __INLINE
#define __INLINE __inline
#endif
#ifndef __FORCEINLINE
#define __FORCEINLINE __forceinline
#endif
#ifndef __STATIC_INLINE
#define __STATIC_INLINE static __inline
#endif
#ifndef __STATIC_FORCEINLINE
#define __STATIC_FORCEINLINE static __forceinline
#endif
#ifndef __NO_RETURN
#define __NO_RETURN __declspec(noreturn)
#endif
#ifndef CMSIS_DEPRECATED
#define CMSIS_DEPRECATED __attribute__((deprecated))
#endif
#ifndef __USED
#define __USED __attribute__((used))
#endif
#ifndef __WEAK
#define __WEAK __attribute__((weak))
#endif
#ifndef __PACKED
#define __PACKED __attribute__((packed))
#endif
#ifndef __PACKED_STRUCT
#define __PACKED_STRUCT __packed struct
#endif
#ifndef __UNALIGNED_UINT16_WRITE
#define __UNALIGNED_UINT16_WRITE(addr, val) ((*((__packed uint16_t *)(addr))) = (val))
#endif
#ifndef __UNALIGNED_UINT16_READ
#define __UNALIGNED_UINT16_READ(addr) (*((const __packed uint16_t *)(addr)))
#endif
#ifndef __UNALIGNED_UINT32_WRITE
#define __UNALIGNED_UINT32_WRITE(addr, val) ((*((__packed uint32_t *)(addr))) = (val))
#endif
#ifndef __UNALIGNED_UINT32_READ
#define __UNALIGNED_UINT32_READ(addr) (*((const __packed uint32_t *)(addr)))
#endif
#ifndef __ALIGNED
#define __ALIGNED(x) __attribute__((aligned(x)))
#endif
#ifndef __PACKED
#define __PACKED __attribute__((packed))
#endif
/* ########################## Core Instruction Access ######################### */
/**
\brief No Operation
*/
#define __NOP __nop
/**
\brief Wait For Interrupt
*/
#define __WFI __wfi
/**
\brief Wait For Event
*/
#define __WFE __wfe
/**
\brief Send Event
*/
#define __SEV __sev
/**
\brief Instruction Synchronization Barrier
*/
#define __ISB() do {\
__schedule_barrier();\
__isb(0xF);\
__schedule_barrier();\
} while (0U)
/**
\brief Data Synchronization Barrier
*/
#define __DSB() do {\
__schedule_barrier();\
__dsb(0xF);\
__schedule_barrier();\
} while (0U)
/**
\brief Data Memory Barrier
*/
#define __DMB() do {\
__schedule_barrier();\
__dmb(0xF);\
__schedule_barrier();\
} while (0U)
/**
\brief Reverse byte order (32 bit)
\details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412.
\param [in] value Value to reverse
\return Reversed value
*/
#define __REV __rev
/**
\brief Reverse byte order (16 bit)
\details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856.
\param [in] value Value to reverse
\return Reversed value
*/
#ifndef __NO_EMBEDDED_ASM
__attribute__((section(".rev16_text"))) __STATIC_INLINE __ASM uint32_t __REV16(uint32_t value)
{
rev16 r0, r0
bx lr
}
#endif
/**
\brief Reverse byte order (16 bit)
\details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000.
\param [in] value Value to reverse
\return Reversed value
*/
#ifndef __NO_EMBEDDED_ASM
__attribute__((section(".revsh_text"))) __STATIC_INLINE __ASM int16_t __REVSH(int16_t value)
{
revsh r0, r0
bx lr
}
#endif
/**
\brief Rotate Right in unsigned value (32 bit)
\param [in] op1 Value to rotate
\param [in] op2 Number of Bits to rotate
\return Rotated value
*/
#define __ROR __ror
/**
\brief Breakpoint
\param [in] value is ignored by the processor.
If required, a debugger can use it to store additional information about the breakpoint.
*/
#define __BKPT(value) __breakpoint(value)
/**
\brief Reverse bit order of value
\param [in] value Value to reverse
\return Reversed value
*/
#define __RBIT __rbit
/**
\brief Count leading zeros
\param [in] value Value to count the leading zeros
\return number of leading zeros in value
*/
#define __CLZ __clz
/**
\brief LDR Exclusive (8 bit)
\details Executes a exclusive LDR instruction for 8 bit value.
\param [in] ptr Pointer to data
\return value of type uint8_t at (*ptr)
*/
#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020)
#define __LDREXB(ptr) ((uint8_t ) __ldrex(ptr))
#else
#define __LDREXB(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((uint8_t ) __ldrex(ptr)) _Pragma("pop")
#endif
/**
\brief LDR Exclusive (16 bit)
\details Executes a exclusive LDR instruction for 16 bit values.
\param [in] ptr Pointer to data
\return value of type uint16_t at (*ptr)
*/
#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020)
#define __LDREXH(ptr) ((uint16_t) __ldrex(ptr))
#else
#define __LDREXH(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((uint16_t) __ldrex(ptr)) _Pragma("pop")
#endif
/**
\brief LDR Exclusive (32 bit)
\details Executes a exclusive LDR instruction for 32 bit values.
\param [in] ptr Pointer to data
\return value of type uint32_t at (*ptr)
*/
#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020)
#define __LDREXW(ptr) ((uint32_t ) __ldrex(ptr))
#else
#define __LDREXW(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((uint32_t ) __ldrex(ptr)) _Pragma("pop")
#endif
/**
\brief STR Exclusive (8 bit)
\details Executes a exclusive STR instruction for 8 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
\return 0 Function succeeded
\return 1 Function failed
*/
#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020)
#define __STREXB(value, ptr) __strex(value, ptr)
#else
#define __STREXB(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop")
#endif
/**
\brief STR Exclusive (16 bit)
\details Executes a exclusive STR instruction for 16 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
\return 0 Function succeeded
\return 1 Function failed
*/
#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020)
#define __STREXH(value, ptr) __strex(value, ptr)
#else
#define __STREXH(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop")
#endif
/**
\brief STR Exclusive (32 bit)
\details Executes a exclusive STR instruction for 32 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
\return 0 Function succeeded
\return 1 Function failed
*/
#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020)
#define __STREXW(value, ptr) __strex(value, ptr)
#else
#define __STREXW(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop")
#endif
/**
\brief Remove the exclusive lock
\details Removes the exclusive lock which is created by LDREX.
*/
#define __CLREX __clrex
/**
\brief Signed Saturate
\details Saturates a signed value.
\param [in] value Value to be saturated
\param [in] sat Bit position to saturate to (1..32)
\return Saturated value
*/
#define __SSAT __ssat
/**
\brief Unsigned Saturate
\details Saturates an unsigned value.
\param [in] value Value to be saturated
\param [in] sat Bit position to saturate to (0..31)
\return Saturated value
*/
#define __USAT __usat
/* ########################### Core Function Access ########################### */
/**
\brief Get FPSCR (Floating Point Status/Control)
\return Floating Point Status/Control register value
*/
__STATIC_INLINE uint32_t __get_FPSCR(void)
{
#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
(defined (__FPU_USED ) && (__FPU_USED == 1U)) )
register uint32_t __regfpscr __ASM("fpscr");
return(__regfpscr);
#else
return(0U);
#endif
}
/**
\brief Set FPSCR (Floating Point Status/Control)
\param [in] fpscr Floating Point Status/Control value to set
*/
__STATIC_INLINE void __set_FPSCR(uint32_t fpscr)
{
#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
(defined (__FPU_USED ) && (__FPU_USED == 1U)) )
register uint32_t __regfpscr __ASM("fpscr");
__regfpscr = (fpscr);
#else
(void)fpscr;
#endif
}
/** \brief Get CPSR (Current Program Status Register)
\return CPSR Register value
*/
__STATIC_INLINE uint32_t __get_CPSR(void)
{
register uint32_t __regCPSR __ASM("cpsr");
return(__regCPSR);
}
/** \brief Set CPSR (Current Program Status Register)
\param [in] cpsr CPSR value to set
*/
__STATIC_INLINE void __set_CPSR(uint32_t cpsr)
{
register uint32_t __regCPSR __ASM("cpsr");
__regCPSR = cpsr;
}
/** \brief Get Mode
\return Processor Mode
*/
__STATIC_INLINE uint32_t __get_mode(void)
{
return (__get_CPSR() & 0x1FU);
}
/** \brief Set Mode
\param [in] mode Mode value to set
*/
__STATIC_INLINE __ASM void __set_mode(uint32_t mode)
{
MOV r1, lr
MSR CPSR_C, r0
BX r1
}
/** \brief Get Stack Pointer
\return Stack Pointer
*/
__STATIC_INLINE __ASM uint32_t __get_SP(void)
{
MOV r0, sp
BX lr
}
/** \brief Set Stack Pointer
\param [in] stack Stack Pointer value to set
*/
__STATIC_INLINE __ASM void __set_SP(uint32_t stack)
{
MOV sp, r0
BX lr
}
/** \brief Get USR/SYS Stack Pointer
\return USR/SYSStack Pointer
*/
__STATIC_INLINE __ASM uint32_t __get_SP_usr(void)
{
ARM
PRESERVE8
MRS R1, CPSR
CPS #0x1F ;no effect in USR mode
MOV R0, SP
MSR CPSR_c, R1 ;no effect in USR mode
ISB
BX LR
}
/** \brief Set USR/SYS Stack Pointer
\param [in] topOfProcStack USR/SYS Stack Pointer value to set
*/
__STATIC_INLINE __ASM void __set_SP_usr(uint32_t topOfProcStack)
{
ARM
PRESERVE8
MRS R1, CPSR
CPS #0x1F ;no effect in USR mode
MOV SP, R0
MSR CPSR_c, R1 ;no effect in USR mode
ISB
BX LR
}
/** \brief Get FPEXC (Floating Point Exception Control Register)
\return Floating Point Exception Control Register value
*/
__STATIC_INLINE uint32_t __get_FPEXC(void)
{
#if (__FPU_PRESENT == 1)
register uint32_t __regfpexc __ASM("fpexc");
return(__regfpexc);
#else
return(0);
#endif
}
/** \brief Set FPEXC (Floating Point Exception Control Register)
\param [in] fpexc Floating Point Exception Control value to set
*/
__STATIC_INLINE void __set_FPEXC(uint32_t fpexc)
{
#if (__FPU_PRESENT == 1)
register uint32_t __regfpexc __ASM("fpexc");
__regfpexc = (fpexc);
#endif
}
/*
* Include common core functions to access Coprocessor 15 registers
*/
#define __get_CP(cp, op1, Rt, CRn, CRm, op2) do { register volatile uint32_t tmp __ASM("cp" # cp ":" # op1 ":c" # CRn ":c" # CRm ":" # op2); (Rt) = tmp; } while(0)
#define __set_CP(cp, op1, Rt, CRn, CRm, op2) do { register volatile uint32_t tmp __ASM("cp" # cp ":" # op1 ":c" # CRn ":c" # CRm ":" # op2); tmp = (Rt); } while(0)
#define __get_CP64(cp, op1, Rt, CRm) \
do { \
uint32_t ltmp, htmp; \
__ASM volatile("MRRC p" # cp ", " # op1 ", ltmp, htmp, c" # CRm); \
(Rt) = ((((uint64_t)htmp) << 32U) | ((uint64_t)ltmp)); \
} while(0)
#define __set_CP64(cp, op1, Rt, CRm) \
do { \
const uint64_t tmp = (Rt); \
const uint32_t ltmp = (uint32_t)(tmp); \
const uint32_t htmp = (uint32_t)(tmp >> 32U); \
__ASM volatile("MCRR p" # cp ", " # op1 ", ltmp, htmp, c" # CRm); \
} while(0)
#include "cmsis_cp15.h"
/** \brief Enable Floating Point Unit
Critical section, called from undef handler, so systick is disabled
*/
__STATIC_INLINE __ASM void __FPU_Enable(void)
{
ARM
//Permit access to VFP/NEON, registers by modifying CPACR
MRC p15,0,R1,c1,c0,2
ORR R1,R1,#0x00F00000
MCR p15,0,R1,c1,c0,2
//Ensure that subsequent instructions occur in the context of VFP/NEON access permitted
ISB
//Enable VFP/NEON
VMRS R1,FPEXC
ORR R1,R1,#0x40000000
VMSR FPEXC,R1
//Initialise VFP/NEON registers to 0
MOV R2,#0
//Initialise D16 registers to 0
VMOV D0, R2,R2
VMOV D1, R2,R2
VMOV D2, R2,R2
VMOV D3, R2,R2
VMOV D4, R2,R2
VMOV D5, R2,R2
VMOV D6, R2,R2
VMOV D7, R2,R2
VMOV D8, R2,R2
VMOV D9, R2,R2
VMOV D10,R2,R2
VMOV D11,R2,R2
VMOV D12,R2,R2
VMOV D13,R2,R2
VMOV D14,R2,R2
VMOV D15,R2,R2
IF {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} == 32
//Initialise D32 registers to 0
VMOV D16,R2,R2
VMOV D17,R2,R2
VMOV D18,R2,R2
VMOV D19,R2,R2
VMOV D20,R2,R2
VMOV D21,R2,R2
VMOV D22,R2,R2
VMOV D23,R2,R2
VMOV D24,R2,R2
VMOV D25,R2,R2
VMOV D26,R2,R2
VMOV D27,R2,R2
VMOV D28,R2,R2
VMOV D29,R2,R2
VMOV D30,R2,R2
VMOV D31,R2,R2
ENDIF
//Initialise FPSCR to a known state
VMRS R2,FPSCR
LDR R3,=0x00086060 //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero.
AND R2,R2,R3
VMSR FPSCR,R2
BX LR
}
#endif /* __CMSIS_ARMCC_H */

View File

@ -0,0 +1,503 @@
/**************************************************************************//**
* @file cmsis_armclang.h
* @brief CMSIS compiler specific macros, functions, instructions
* @version V1.0.2
* @date 10. January 2018
******************************************************************************/
/*
* Copyright (c) 2009-2018 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __CMSIS_ARMCLANG_H
#define __CMSIS_ARMCLANG_H
#pragma clang system_header /* treat file as system include file */
#ifndef __ARM_COMPAT_H
#include <arm_compat.h> /* Compatibility header for Arm Compiler 5 intrinsics */
#endif
/* CMSIS compiler specific defines */
#ifndef __ASM
#define __ASM __asm
#endif
#ifndef __INLINE
#define __INLINE __inline
#endif
#ifndef __FORCEINLINE
#define __FORCEINLINE __attribute__((always_inline))
#endif
#ifndef __STATIC_INLINE
#define __STATIC_INLINE static __inline
#endif
#ifndef __STATIC_FORCEINLINE
#define __STATIC_FORCEINLINE __attribute__((always_inline)) static __inline
#endif
#ifndef __NO_RETURN
#define __NO_RETURN __attribute__((__noreturn__))
#endif
#ifndef CMSIS_DEPRECATED
#define CMSIS_DEPRECATED __attribute__((deprecated))
#endif
#ifndef __USED
#define __USED __attribute__((used))
#endif
#ifndef __WEAK
#define __WEAK __attribute__((weak))
#endif
#ifndef __PACKED
#define __PACKED __attribute__((packed, aligned(1)))
#endif
#ifndef __PACKED_STRUCT
#define __PACKED_STRUCT struct __attribute__((packed, aligned(1)))
#endif
#ifndef __UNALIGNED_UINT16_WRITE
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpacked"
/*lint -esym(9058, T_UINT16_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_WRITE */
__PACKED_STRUCT T_UINT16_WRITE { uint16_t v; };
#pragma clang diagnostic pop
#define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val))
#endif
#ifndef __UNALIGNED_UINT16_READ
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpacked"
/*lint -esym(9058, T_UINT16_READ)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_READ */
__PACKED_STRUCT T_UINT16_READ { uint16_t v; };
#pragma clang diagnostic pop
#define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v)
#endif
#ifndef __UNALIGNED_UINT32_WRITE
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpacked"
/*lint -esym(9058, T_UINT32_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32_WRITE */
__PACKED_STRUCT T_UINT32_WRITE { uint32_t v; };
#pragma clang diagnostic pop
#define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val))
#endif
#ifndef __UNALIGNED_UINT32_READ
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpacked"
__PACKED_STRUCT T_UINT32_READ { uint32_t v; };
#pragma clang diagnostic pop
#define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v)
#endif
#ifndef __ALIGNED
#define __ALIGNED(x) __attribute__((aligned(x)))
#endif
#ifndef __PACKED
#define __PACKED __attribute__((packed))
#endif
/* ########################## Core Instruction Access ######################### */
/**
\brief No Operation
*/
#define __NOP __builtin_arm_nop
/**
\brief Wait For Interrupt
*/
#define __WFI __builtin_arm_wfi
/**
\brief Wait For Event
*/
#define __WFE __builtin_arm_wfe
/**
\brief Send Event
*/
#define __SEV __builtin_arm_sev
/**
\brief Instruction Synchronization Barrier
*/
#define __ISB() do {\
__schedule_barrier();\
__builtin_arm_isb(0xF);\
__schedule_barrier();\
} while (0U)
/**
\brief Data Synchronization Barrier
*/
#define __DSB() do {\
__schedule_barrier();\
__builtin_arm_dsb(0xF);\
__schedule_barrier();\
} while (0U)
/**
\brief Data Memory Barrier
*/
#define __DMB() do {\
__schedule_barrier();\
__builtin_arm_dmb(0xF);\
__schedule_barrier();\
} while (0U)
/**
\brief Reverse byte order (32 bit)
\details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412.
\param [in] value Value to reverse
\return Reversed value
*/
#define __REV(value) __builtin_bswap32(value)
/**
\brief Reverse byte order (16 bit)
\details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856.
\param [in] value Value to reverse
\return Reversed value
*/
#define __REV16(value) __ROR(__REV(value), 16)
/**
\brief Reverse byte order (16 bit)
\details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000.
\param [in] value Value to reverse
\return Reversed value
*/
#define __REVSH(value) (int16_t)__builtin_bswap16(value)
/**
\brief Rotate Right in unsigned value (32 bit)
\details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
\param [in] op1 Value to rotate
\param [in] op2 Number of Bits to rotate
\return Rotated value
*/
__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
{
op2 %= 32U;
if (op2 == 0U)
{
return op1;
}
return (op1 >> op2) | (op1 << (32U - op2));
}
/**
\brief Breakpoint
\param [in] value is ignored by the processor.
If required, a debugger can use it to store additional information about the breakpoint.
*/
#define __BKPT(value) __ASM volatile ("bkpt "#value)
/**
\brief Reverse bit order of value
\param [in] value Value to reverse
\return Reversed value
*/
#define __RBIT __builtin_arm_rbit
/**
\brief Count leading zeros
\param [in] value Value to count the leading zeros
\return number of leading zeros in value
*/
#define __CLZ (uint8_t)__builtin_clz
/**
\brief LDR Exclusive (8 bit)
\details Executes a exclusive LDR instruction for 8 bit value.
\param [in] ptr Pointer to data
\return value of type uint8_t at (*ptr)
*/
#define __LDREXB (uint8_t)__builtin_arm_ldrex
/**
\brief LDR Exclusive (16 bit)
\details Executes a exclusive LDR instruction for 16 bit values.
\param [in] ptr Pointer to data
\return value of type uint16_t at (*ptr)
*/
#define __LDREXH (uint16_t)__builtin_arm_ldrex
/**
\brief LDR Exclusive (32 bit)
\details Executes a exclusive LDR instruction for 32 bit values.
\param [in] ptr Pointer to data
\return value of type uint32_t at (*ptr)
*/
#define __LDREXW (uint32_t)__builtin_arm_ldrex
/**
\brief STR Exclusive (8 bit)
\details Executes a exclusive STR instruction for 8 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
\return 0 Function succeeded
\return 1 Function failed
*/
#define __STREXB (uint32_t)__builtin_arm_strex
/**
\brief STR Exclusive (16 bit)
\details Executes a exclusive STR instruction for 16 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
\return 0 Function succeeded
\return 1 Function failed
*/
#define __STREXH (uint32_t)__builtin_arm_strex
/**
\brief STR Exclusive (32 bit)
\details Executes a exclusive STR instruction for 32 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
\return 0 Function succeeded
\return 1 Function failed
*/
#define __STREXW (uint32_t)__builtin_arm_strex
/**
\brief Remove the exclusive lock
\details Removes the exclusive lock which is created by LDREX.
*/
#define __CLREX __builtin_arm_clrex
/**
\brief Signed Saturate
\details Saturates a signed value.
\param [in] value Value to be saturated
\param [in] sat Bit position to saturate to (1..32)
\return Saturated value
*/
#define __SSAT __builtin_arm_ssat
/**
\brief Unsigned Saturate
\details Saturates an unsigned value.
\param [in] value Value to be saturated
\param [in] sat Bit position to saturate to (0..31)
\return Saturated value
*/
#define __USAT __builtin_arm_usat
/* ########################### Core Function Access ########################### */
/**
\brief Get FPSCR
\details Returns the current value of the Floating Point Status/Control register.
\return Floating Point Status/Control register value
*/
#define __get_FPSCR __builtin_arm_get_fpscr
/**
\brief Set FPSCR
\details Assigns the given value to the Floating Point Status/Control register.
\param [in] fpscr Floating Point Status/Control value to set
*/
#define __set_FPSCR __builtin_arm_set_fpscr
/** \brief Get CPSR Register
\return CPSR Register value
*/
__STATIC_FORCEINLINE uint32_t __get_CPSR(void)
{
uint32_t result;
__ASM volatile("MRS %0, cpsr" : "=r" (result) );
return(result);
}
/** \brief Set CPSR Register
\param [in] cpsr CPSR value to set
*/
__STATIC_FORCEINLINE void __set_CPSR(uint32_t cpsr)
{
__ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "memory");
}
/** \brief Get Mode
\return Processor Mode
*/
__STATIC_FORCEINLINE uint32_t __get_mode(void)
{
return (__get_CPSR() & 0x1FU);
}
/** \brief Set Mode
\param [in] mode Mode value to set
*/
__STATIC_FORCEINLINE void __set_mode(uint32_t mode)
{
__ASM volatile("MSR cpsr_c, %0" : : "r" (mode) : "memory");
}
/** \brief Get Stack Pointer
\return Stack Pointer value
*/
__STATIC_FORCEINLINE uint32_t __get_SP()
{
uint32_t result;
__ASM volatile("MOV %0, sp" : "=r" (result) : : "memory");
return result;
}
/** \brief Set Stack Pointer
\param [in] stack Stack Pointer value to set
*/
__STATIC_FORCEINLINE void __set_SP(uint32_t stack)
{
__ASM volatile("MOV sp, %0" : : "r" (stack) : "memory");
}
/** \brief Get USR/SYS Stack Pointer
\return USR/SYS Stack Pointer value
*/
__STATIC_FORCEINLINE uint32_t __get_SP_usr()
{
uint32_t cpsr;
uint32_t result;
__ASM volatile(
"MRS %0, cpsr \n"
"CPS #0x1F \n" // no effect in USR mode
"MOV %1, sp \n"
"MSR cpsr_c, %2 \n" // no effect in USR mode
"ISB" : "=r"(cpsr), "=r"(result) : "r"(cpsr) : "memory"
);
return result;
}
/** \brief Set USR/SYS Stack Pointer
\param [in] topOfProcStack USR/SYS Stack Pointer value to set
*/
__STATIC_FORCEINLINE void __set_SP_usr(uint32_t topOfProcStack)
{
uint32_t cpsr;
__ASM volatile(
"MRS %0, cpsr \n"
"CPS #0x1F \n" // no effect in USR mode
"MOV sp, %1 \n"
"MSR cpsr_c, %2 \n" // no effect in USR mode
"ISB" : "=r"(cpsr) : "r" (topOfProcStack), "r"(cpsr) : "memory"
);
}
/** \brief Get FPEXC
\return Floating Point Exception Control register value
*/
__STATIC_FORCEINLINE uint32_t __get_FPEXC(void)
{
#if (__FPU_PRESENT == 1)
uint32_t result;
__ASM volatile("VMRS %0, fpexc" : "=r" (result) : : "memory");
return(result);
#else
return(0);
#endif
}
/** \brief Set FPEXC
\param [in] fpexc Floating Point Exception Control value to set
*/
__STATIC_FORCEINLINE void __set_FPEXC(uint32_t fpexc)
{
#if (__FPU_PRESENT == 1)
__ASM volatile ("VMSR fpexc, %0" : : "r" (fpexc) : "memory");
#endif
}
/*
* Include common core functions to access Coprocessor 15 registers
*/
#define __get_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MRC p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : "=r" (Rt) : : "memory" )
#define __set_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MCR p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : : "r" (Rt) : "memory" )
#define __get_CP64(cp, op1, Rt, CRm) __ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" )
#define __set_CP64(cp, op1, Rt, CRm) __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" )
#include "cmsis_cp15.h"
/** \brief Enable Floating Point Unit
Critical section, called from undef handler, so systick is disabled
*/
__STATIC_INLINE void __FPU_Enable(void)
{
__ASM volatile(
//Permit access to VFP/NEON, registers by modifying CPACR
" MRC p15,0,R1,c1,c0,2 \n"
" ORR R1,R1,#0x00F00000 \n"
" MCR p15,0,R1,c1,c0,2 \n"
//Ensure that subsequent instructions occur in the context of VFP/NEON access permitted
" ISB \n"
//Enable VFP/NEON
" VMRS R1,FPEXC \n"
" ORR R1,R1,#0x40000000 \n"
" VMSR FPEXC,R1 \n"
//Initialise VFP/NEON registers to 0
" MOV R2,#0 \n"
//Initialise D16 registers to 0
" VMOV D0, R2,R2 \n"
" VMOV D1, R2,R2 \n"
" VMOV D2, R2,R2 \n"
" VMOV D3, R2,R2 \n"
" VMOV D4, R2,R2 \n"
" VMOV D5, R2,R2 \n"
" VMOV D6, R2,R2 \n"
" VMOV D7, R2,R2 \n"
" VMOV D8, R2,R2 \n"
" VMOV D9, R2,R2 \n"
" VMOV D10,R2,R2 \n"
" VMOV D11,R2,R2 \n"
" VMOV D12,R2,R2 \n"
" VMOV D13,R2,R2 \n"
" VMOV D14,R2,R2 \n"
" VMOV D15,R2,R2 \n"
#if __ARM_NEON == 1
//Initialise D32 registers to 0
" VMOV D16,R2,R2 \n"
" VMOV D17,R2,R2 \n"
" VMOV D18,R2,R2 \n"
" VMOV D19,R2,R2 \n"
" VMOV D20,R2,R2 \n"
" VMOV D21,R2,R2 \n"
" VMOV D22,R2,R2 \n"
" VMOV D23,R2,R2 \n"
" VMOV D24,R2,R2 \n"
" VMOV D25,R2,R2 \n"
" VMOV D26,R2,R2 \n"
" VMOV D27,R2,R2 \n"
" VMOV D28,R2,R2 \n"
" VMOV D29,R2,R2 \n"
" VMOV D30,R2,R2 \n"
" VMOV D31,R2,R2 \n"
#endif
//Initialise FPSCR to a known state
" VMRS R2,FPSCR \n"
" LDR R3,=0x00086060 \n" //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero.
" AND R2,R2,R3 \n"
" VMSR FPSCR,R2 "
);
}
#endif /* __CMSIS_ARMCLANG_H */

View File

@ -0,0 +1,201 @@
/**************************************************************************//**
* @file cmsis_compiler.h
* @brief CMSIS compiler specific macros, functions, instructions
* @version V1.0.2
* @date 10. January 2018
******************************************************************************/
/*
* Copyright (c) 2009-2018 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __CMSIS_COMPILER_H
#define __CMSIS_COMPILER_H
#include <stdint.h>
/*
* Arm Compiler 4/5
*/
#if defined ( __CC_ARM )
#include "cmsis_armcc.h"
/*
* Arm Compiler 6 (armclang)
*/
#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
#include "cmsis_armclang.h"
/*
* GNU Compiler
*/
#elif defined ( __GNUC__ )
#include "cmsis_gcc.h"
/*
* IAR Compiler
*/
#elif defined ( __ICCARM__ )
#include "cmsis_iccarm.h"
/*
* TI Arm Compiler
*/
#elif defined ( __TI_ARM__ )
#include <cmsis_ccs.h>
#ifndef __ASM
#define __ASM __asm
#endif
#ifndef __INLINE
#define __INLINE inline
#endif
#ifndef __STATIC_INLINE
#define __STATIC_INLINE static inline
#endif
#ifndef __STATIC_INLINE
#define __STATIC_INLINE static inline
#endif
#ifndef __STATIC_FORCEINLINE
#define __STATIC_FORCEINLINE __STATIC_INLINE
#endif
#ifndef __NO_RETURN
#define __NO_RETURN __attribute__((noreturn))
#endif
#ifndef CMSIS_DEPRECATED
#define CMSIS_DEPRECATED __attribute__((deprecated))
#endif
#ifndef __USED
#define __USED __attribute__((used))
#endif
#ifndef __WEAK
#define __WEAK __attribute__((weak))
#endif
#ifndef __UNALIGNED_UINT32
struct __attribute__((packed)) T_UINT32 { uint32_t v; };
#define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v)
#endif
#ifndef __ALIGNED
#define __ALIGNED(x) __attribute__((aligned(x)))
#endif
#ifndef __PACKED
#define __PACKED __attribute__((packed))
#endif
/*
* TASKING Compiler
*/
#elif defined ( __TASKING__ )
/*
* The CMSIS functions have been implemented as intrinsics in the compiler.
* Please use "carm -?i" to get an up to date list of all intrinsics,
* Including the CMSIS ones.
*/
#ifndef __ASM
#define __ASM __asm
#endif
#ifndef __INLINE
#define __INLINE inline
#endif
#ifndef __STATIC_INLINE
#define __STATIC_INLINE static inline
#endif
#ifndef __STATIC_FORCEINLINE
#define __STATIC_FORCEINLINE __STATIC_INLINE
#endif
#ifndef __NO_RETURN
#define __NO_RETURN __attribute__((noreturn))
#endif
#ifndef CMSIS_DEPRECATED
#define CMSIS_DEPRECATED __attribute__((deprecated))
#endif
#ifndef __USED
#define __USED __attribute__((used))
#endif
#ifndef __WEAK
#define __WEAK __attribute__((weak))
#endif
#ifndef __UNALIGNED_UINT32
struct __packed__ T_UINT32 { uint32_t v; };
#define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v)
#endif
#ifndef __ALIGNED
#define __ALIGNED(x) __align(x)
#endif
#ifndef __PACKED
#define __PACKED __packed__
#endif
/*
* COSMIC Compiler
*/
#elif defined ( __CSMC__ )
#include <cmsis_csm.h>
#ifndef __ASM
#define __ASM _asm
#endif
#ifndef __INLINE
#define __INLINE inline
#endif
#ifndef __STATIC_INLINE
#define __STATIC_INLINE static inline
#endif
#ifndef __STATIC_FORCEINLINE
#define __STATIC_FORCEINLINE __STATIC_INLINE
#endif
#ifndef __NO_RETURN
// NO RETURN is automatically detected hence no warning here
#define __NO_RETURN
#endif
#ifndef __USED
#warning No compiler specific solution for __USED. __USED is ignored.
#define __USED
#endif
#ifndef CMSIS_DEPRECATED
#warning No compiler specific solution for CMSIS_DEPRECATED. CMSIS_DEPRECATED is ignored.
#define CMSIS_DEPRECATED
#endif
#ifndef __WEAK
#define __WEAK __weak
#endif
#ifndef __UNALIGNED_UINT32
@packed struct T_UINT32 { uint32_t v; };
#define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v)
#endif
#ifndef __ALIGNED
#warning No compiler specific solution for __ALIGNED. __ALIGNED is ignored.
#define __ALIGNED(x)
#endif
#ifndef __PACKED
#define __PACKED @packed
#endif
#else
#error Unknown compiler.
#endif
#endif /* __CMSIS_COMPILER_H */

View File

@ -0,0 +1,514 @@
/**************************************************************************//**
* @file cmsis_cp15.h
* @brief CMSIS compiler specific macros, functions, instructions
* @version V1.0.1
* @date 07. Sep 2017
******************************************************************************/
/*
* Copyright (c) 2009-2017 ARM Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#if defined ( __ICCARM__ )
#pragma system_include /* treat file as system include file for MISRA check */
#elif defined (__clang__)
#pragma clang system_header /* treat file as system include file */
#endif
#ifndef __CMSIS_CP15_H
#define __CMSIS_CP15_H
/** \brief Get ACTLR
\return Auxiliary Control register value
*/
__STATIC_FORCEINLINE uint32_t __get_ACTLR(void)
{
uint32_t result;
__get_CP(15, 0, result, 1, 0, 1);
return(result);
}
/** \brief Set ACTLR
\param [in] actlr Auxiliary Control value to set
*/
__STATIC_FORCEINLINE void __set_ACTLR(uint32_t actlr)
{
__set_CP(15, 0, actlr, 1, 0, 1);
}
/** \brief Get CPACR
\return Coprocessor Access Control register value
*/
__STATIC_FORCEINLINE uint32_t __get_CPACR(void)
{
uint32_t result;
__get_CP(15, 0, result, 1, 0, 2);
return result;
}
/** \brief Set CPACR
\param [in] cpacr Coprocessor Access Control value to set
*/
__STATIC_FORCEINLINE void __set_CPACR(uint32_t cpacr)
{
__set_CP(15, 0, cpacr, 1, 0, 2);
}
/** \brief Get DFSR
\return Data Fault Status Register value
*/
__STATIC_FORCEINLINE uint32_t __get_DFSR(void)
{
uint32_t result;
__get_CP(15, 0, result, 5, 0, 0);
return result;
}
/** \brief Set DFSR
\param [in] dfsr Data Fault Status value to set
*/
__STATIC_FORCEINLINE void __set_DFSR(uint32_t dfsr)
{
__set_CP(15, 0, dfsr, 5, 0, 0);
}
/** \brief Get IFSR
\return Instruction Fault Status Register value
*/
__STATIC_FORCEINLINE uint32_t __get_IFSR(void)
{
uint32_t result;
__get_CP(15, 0, result, 5, 0, 1);
return result;
}
/** \brief Set IFSR
\param [in] ifsr Instruction Fault Status value to set
*/
__STATIC_FORCEINLINE void __set_IFSR(uint32_t ifsr)
{
__set_CP(15, 0, ifsr, 5, 0, 1);
}
/** \brief Get ISR
\return Interrupt Status Register value
*/
__STATIC_FORCEINLINE uint32_t __get_ISR(void)
{
uint32_t result;
__get_CP(15, 0, result, 12, 1, 0);
return result;
}
/** \brief Get CBAR
\return Configuration Base Address register value
*/
__STATIC_FORCEINLINE uint32_t __get_CBAR(void)
{
uint32_t result;
__get_CP(15, 4, result, 15, 0, 0);
return result;
}
/** \brief Get TTBR0
This function returns the value of the Translation Table Base Register 0.
\return Translation Table Base Register 0 value
*/
__STATIC_FORCEINLINE uint32_t __get_TTBR0(void)
{
uint32_t result;
__get_CP(15, 0, result, 2, 0, 0);
return result;
}
/** \brief Set TTBR0
This function assigns the given value to the Translation Table Base Register 0.
\param [in] ttbr0 Translation Table Base Register 0 value to set
*/
__STATIC_FORCEINLINE void __set_TTBR0(uint32_t ttbr0)
{
__set_CP(15, 0, ttbr0, 2, 0, 0);
}
/** \brief Get DACR
This function returns the value of the Domain Access Control Register.
\return Domain Access Control Register value
*/
__STATIC_FORCEINLINE uint32_t __get_DACR(void)
{
uint32_t result;
__get_CP(15, 0, result, 3, 0, 0);
return result;
}
/** \brief Set DACR
This function assigns the given value to the Domain Access Control Register.
\param [in] dacr Domain Access Control Register value to set
*/
__STATIC_FORCEINLINE void __set_DACR(uint32_t dacr)
{
__set_CP(15, 0, dacr, 3, 0, 0);
}
/** \brief Set SCTLR
This function assigns the given value to the System Control Register.
\param [in] sctlr System Control Register value to set
*/
__STATIC_FORCEINLINE void __set_SCTLR(uint32_t sctlr)
{
__set_CP(15, 0, sctlr, 1, 0, 0);
}
/** \brief Get SCTLR
\return System Control Register value
*/
__STATIC_FORCEINLINE uint32_t __get_SCTLR(void)
{
uint32_t result;
__get_CP(15, 0, result, 1, 0, 0);
return result;
}
/** \brief Set ACTRL
\param [in] actrl Auxiliary Control Register value to set
*/
__STATIC_FORCEINLINE void __set_ACTRL(uint32_t actrl)
{
__set_CP(15, 0, actrl, 1, 0, 1);
}
/** \brief Get ACTRL
\return Auxiliary Control Register value
*/
__STATIC_FORCEINLINE uint32_t __get_ACTRL(void)
{
uint32_t result;
__get_CP(15, 0, result, 1, 0, 1);
return result;
}
/** \brief Get MPIDR
This function returns the value of the Multiprocessor Affinity Register.
\return Multiprocessor Affinity Register value
*/
__STATIC_FORCEINLINE uint32_t __get_MPIDR(void)
{
uint32_t result;
__get_CP(15, 0, result, 0, 0, 5);
return result;
}
/** \brief Get VBAR
This function returns the value of the Vector Base Address Register.
\return Vector Base Address Register
*/
__STATIC_FORCEINLINE uint32_t __get_VBAR(void)
{
uint32_t result;
__get_CP(15, 0, result, 12, 0, 0);
return result;
}
/** \brief Set VBAR
This function assigns the given value to the Vector Base Address Register.
\param [in] vbar Vector Base Address Register value to set
*/
__STATIC_FORCEINLINE void __set_VBAR(uint32_t vbar)
{
__set_CP(15, 0, vbar, 12, 0, 0);
}
/** \brief Get MVBAR
This function returns the value of the Monitor Vector Base Address Register.
\return Monitor Vector Base Address Register
*/
__STATIC_FORCEINLINE uint32_t __get_MVBAR(void)
{
uint32_t result;
__get_CP(15, 0, result, 12, 0, 1);
return result;
}
/** \brief Set MVBAR
This function assigns the given value to the Monitor Vector Base Address Register.
\param [in] mvbar Monitor Vector Base Address Register value to set
*/
__STATIC_FORCEINLINE void __set_MVBAR(uint32_t mvbar)
{
__set_CP(15, 0, mvbar, 12, 0, 1);
}
#if (defined(__CORTEX_A) && (__CORTEX_A == 7U) && \
defined(__TIM_PRESENT) && (__TIM_PRESENT == 1U)) || \
defined(DOXYGEN)
/** \brief Set CNTFRQ
This function assigns the given value to PL1 Physical Timer Counter Frequency Register (CNTFRQ).
\param [in] value CNTFRQ Register value to set
*/
__STATIC_FORCEINLINE void __set_CNTFRQ(uint32_t value)
{
__set_CP(15, 0, value, 14, 0, 0);
}
/** \brief Get CNTFRQ
This function returns the value of the PL1 Physical Timer Counter Frequency Register (CNTFRQ).
\return CNTFRQ Register value
*/
__STATIC_FORCEINLINE uint32_t __get_CNTFRQ(void)
{
uint32_t result;
__get_CP(15, 0, result, 14, 0 , 0);
return result;
}
/** \brief Set CNTP_TVAL
This function assigns the given value to PL1 Physical Timer Value Register (CNTP_TVAL).
\param [in] value CNTP_TVAL Register value to set
*/
__STATIC_FORCEINLINE void __set_CNTP_TVAL(uint32_t value)
{
__set_CP(15, 0, value, 14, 2, 0);
}
/** \brief Get CNTP_TVAL
This function returns the value of the PL1 Physical Timer Value Register (CNTP_TVAL).
\return CNTP_TVAL Register value
*/
__STATIC_FORCEINLINE uint32_t __get_CNTP_TVAL(void)
{
uint32_t result;
__get_CP(15, 0, result, 14, 2, 0);
return result;
}
/** \brief Get CNTPCT
This function returns the value of the 64 bits PL1 Physical Count Register (CNTPCT).
\return CNTPCT Register value
*/
__STATIC_FORCEINLINE uint64_t __get_CNTPCT(void)
{
uint64_t result;
__get_CP64(15, 0, result, 14);
return result;
}
/** \brief Set CNTP_CVAL
This function assigns the given value to 64bits PL1 Physical Timer CompareValue Register (CNTP_CVAL).
\param [in] value CNTP_CVAL Register value to set
*/
__STATIC_FORCEINLINE void __set_CNTP_CVAL(uint64_t value)
{
__set_CP64(15, 2, value, 14);
}
/** \brief Get CNTP_CVAL
This function returns the value of the 64 bits PL1 Physical Timer CompareValue Register (CNTP_CVAL).
\return CNTP_CVAL Register value
*/
__STATIC_FORCEINLINE uint64_t __get_CNTP_CVAL(void)
{
uint64_t result;
__get_CP64(15, 2, result, 14);
return result;
}
/** \brief Set CNTP_CTL
This function assigns the given value to PL1 Physical Timer Control Register (CNTP_CTL).
\param [in] value CNTP_CTL Register value to set
*/
__STATIC_FORCEINLINE void __set_CNTP_CTL(uint32_t value)
{
__set_CP(15, 0, value, 14, 2, 1);
}
/** \brief Get CNTP_CTL register
\return CNTP_CTL Register value
*/
__STATIC_FORCEINLINE uint32_t __get_CNTP_CTL(void)
{
uint32_t result;
__get_CP(15, 0, result, 14, 2, 1);
return result;
}
#endif
/** \brief Set TLBIALL
TLB Invalidate All
*/
__STATIC_FORCEINLINE void __set_TLBIALL(uint32_t value)
{
__set_CP(15, 0, value, 8, 7, 0);
}
/** \brief Set BPIALL.
Branch Predictor Invalidate All
*/
__STATIC_FORCEINLINE void __set_BPIALL(uint32_t value)
{
__set_CP(15, 0, value, 7, 5, 6);
}
/** \brief Set ICIALLU
Instruction Cache Invalidate All
*/
__STATIC_FORCEINLINE void __set_ICIALLU(uint32_t value)
{
__set_CP(15, 0, value, 7, 5, 0);
}
/** \brief Set DCCMVAC
Data cache clean
*/
__STATIC_FORCEINLINE void __set_DCCMVAC(uint32_t value)
{
__set_CP(15, 0, value, 7, 10, 1);
}
/** \brief Set DCIMVAC
Data cache invalidate
*/
__STATIC_FORCEINLINE void __set_DCIMVAC(uint32_t value)
{
__set_CP(15, 0, value, 7, 6, 1);
}
/** \brief Set DCCIMVAC
Data cache clean and invalidate
*/
__STATIC_FORCEINLINE void __set_DCCIMVAC(uint32_t value)
{
__set_CP(15, 0, value, 7, 14, 1);
}
/** \brief Set CSSELR
*/
__STATIC_FORCEINLINE void __set_CSSELR(uint32_t value)
{
// __ASM volatile("MCR p15, 2, %0, c0, c0, 0" : : "r"(value) : "memory");
__set_CP(15, 2, value, 0, 0, 0);
}
/** \brief Get CSSELR
\return CSSELR Register value
*/
__STATIC_FORCEINLINE uint32_t __get_CSSELR(void)
{
uint32_t result;
// __ASM volatile("MRC p15, 2, %0, c0, c0, 0" : "=r"(result) : : "memory");
__get_CP(15, 2, result, 0, 0, 0);
return result;
}
/** \brief Set CCSIDR
\deprecated CCSIDR itself is read-only. Use __set_CSSELR to select cache level instead.
*/
CMSIS_DEPRECATED
__STATIC_FORCEINLINE void __set_CCSIDR(uint32_t value)
{
__set_CSSELR(value);
}
/** \brief Get CCSIDR
\return CCSIDR Register value
*/
__STATIC_FORCEINLINE uint32_t __get_CCSIDR(void)
{
uint32_t result;
// __ASM volatile("MRC p15, 1, %0, c0, c0, 0" : "=r"(result) : : "memory");
__get_CP(15, 1, result, 0, 0, 0);
return result;
}
/** \brief Get CLIDR
\return CLIDR Register value
*/
__STATIC_FORCEINLINE uint32_t __get_CLIDR(void)
{
uint32_t result;
// __ASM volatile("MRC p15, 1, %0, c0, c0, 1" : "=r"(result) : : "memory");
__get_CP(15, 1, result, 0, 0, 1);
return result;
}
/** \brief Set DCISW
*/
__STATIC_FORCEINLINE void __set_DCISW(uint32_t value)
{
// __ASM volatile("MCR p15, 0, %0, c7, c6, 2" : : "r"(value) : "memory")
__set_CP(15, 0, value, 7, 6, 2);
}
/** \brief Set DCCSW
*/
__STATIC_FORCEINLINE void __set_DCCSW(uint32_t value)
{
// __ASM volatile("MCR p15, 0, %0, c7, c10, 2" : : "r"(value) : "memory")
__set_CP(15, 0, value, 7, 10, 2);
}
/** \brief Set DCCISW
*/
__STATIC_FORCEINLINE void __set_DCCISW(uint32_t value)
{
// __ASM volatile("MCR p15, 0, %0, c7, c14, 2" : : "r"(value) : "memory")
__set_CP(15, 0, value, 7, 14, 2);
}
#endif

View File

@ -0,0 +1,679 @@
/**************************************************************************//**
* @file cmsis_gcc.h
* @brief CMSIS compiler specific macros, functions, instructions
* @version V1.0.2
* @date 09. April 2018
******************************************************************************/
/*
* Copyright (c) 2009-2018 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __CMSIS_GCC_H
#define __CMSIS_GCC_H
/* ignore some GCC warnings */
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wsign-conversion"
#pragma GCC diagnostic ignored "-Wconversion"
#pragma GCC diagnostic ignored "-Wunused-parameter"
/* Fallback for __has_builtin */
#ifndef __has_builtin
#define __has_builtin(x) (0)
#endif
/* CMSIS compiler specific defines */
#ifndef __ASM
#define __ASM asm
#endif
#ifndef __INLINE
#define __INLINE inline
#endif
#ifndef __FORCEINLINE
#define __FORCEINLINE __attribute__((always_inline))
#endif
#ifndef __STATIC_INLINE
#define __STATIC_INLINE static inline
#endif
#ifndef __STATIC_FORCEINLINE
#define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline
#endif
#ifndef __NO_RETURN
#define __NO_RETURN __attribute__((__noreturn__))
#endif
#ifndef CMSIS_DEPRECATED
#define CMSIS_DEPRECATED __attribute__((deprecated))
#endif
#ifndef __USED
#define __USED __attribute__((used))
#endif
#ifndef __WEAK
#define __WEAK __attribute__((weak))
#endif
#ifndef __PACKED
#define __PACKED __attribute__((packed, aligned(1)))
#endif
#ifndef __PACKED_STRUCT
#define __PACKED_STRUCT struct __attribute__((packed, aligned(1)))
#endif
#ifndef __UNALIGNED_UINT16_WRITE
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wpacked"
/*lint -esym(9058, T_UINT16_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_WRITE */
__PACKED_STRUCT T_UINT16_WRITE { uint16_t v; };
#pragma GCC diagnostic pop
#define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val))
#endif
#ifndef __UNALIGNED_UINT16_READ
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wpacked"
/*lint -esym(9058, T_UINT16_READ)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_READ */
__PACKED_STRUCT T_UINT16_READ { uint16_t v; };
#pragma GCC diagnostic pop
#define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v)
#endif
#ifndef __UNALIGNED_UINT32_WRITE
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wpacked"
/*lint -esym(9058, T_UINT32_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32_WRITE */
__PACKED_STRUCT T_UINT32_WRITE { uint32_t v; };
#pragma GCC diagnostic pop
#define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val))
#endif
#ifndef __UNALIGNED_UINT32_READ
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wpacked"
__PACKED_STRUCT T_UINT32_READ { uint32_t v; };
#pragma GCC diagnostic pop
#define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v)
#endif
#ifndef __ALIGNED
#define __ALIGNED(x) __attribute__((aligned(x)))
#endif
/* ########################## Core Instruction Access ######################### */
/**
\brief No Operation
*/
#define __NOP() __ASM volatile ("nop")
/**
\brief Wait For Interrupt
*/
#define __WFI() __ASM volatile ("wfi")
/**
\brief Wait For Event
*/
#define __WFE() __ASM volatile ("wfe")
/**
\brief Send Event
*/
#define __SEV() __ASM volatile ("sev")
/**
\brief Instruction Synchronization Barrier
\details Instruction Synchronization Barrier flushes the pipeline in the processor,
so that all instructions following the ISB are fetched from cache or memory,
after the instruction has been completed.
*/
__STATIC_FORCEINLINE void __ISB(void)
{
__ASM volatile ("isb 0xF":::"memory");
}
/**
\brief Data Synchronization Barrier
\details Acts as a special kind of Data Memory Barrier.
It completes when all explicit memory accesses before this instruction complete.
*/
__STATIC_FORCEINLINE void __DSB(void)
{
__ASM volatile ("dsb 0xF":::"memory");
}
/**
\brief Data Memory Barrier
\details Ensures the apparent order of the explicit memory operations before
and after the instruction, without ensuring their completion.
*/
__STATIC_FORCEINLINE void __DMB(void)
{
__ASM volatile ("dmb 0xF":::"memory");
}
/**
\brief Reverse byte order (32 bit)
\details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412.
\param [in] value Value to reverse
\return Reversed value
*/
__STATIC_FORCEINLINE uint32_t __REV(uint32_t value)
{
#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
return __builtin_bswap32(value);
#else
uint32_t result;
__ASM volatile ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
return result;
#endif
}
/**
\brief Reverse byte order (16 bit)
\details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856.
\param [in] value Value to reverse
\return Reversed value
*/
#ifndef __NO_EMBEDDED_ASM
__attribute__((section(".rev16_text"))) __STATIC_INLINE uint32_t __REV16(uint32_t value)
{
uint32_t result;
__ASM volatile("rev16 %0, %1" : "=r" (result) : "r" (value));
return result;
}
#endif
/**
\brief Reverse byte order (16 bit)
\details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000.
\param [in] value Value to reverse
\return Reversed value
*/
__STATIC_FORCEINLINE int16_t __REVSH(int16_t value)
{
#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
return (int16_t)__builtin_bswap16(value);
#else
int16_t result;
__ASM volatile ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
return result;
#endif
}
/**
\brief Rotate Right in unsigned value (32 bit)
\details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
\param [in] op1 Value to rotate
\param [in] op2 Number of Bits to rotate
\return Rotated value
*/
__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
{
op2 %= 32U;
if (op2 == 0U) {
return op1;
}
return (op1 >> op2) | (op1 << (32U - op2));
}
/**
\brief Breakpoint
\param [in] value is ignored by the processor.
If required, a debugger can use it to store additional information about the breakpoint.
*/
#define __BKPT(value) __ASM volatile ("bkpt "#value)
/**
\brief Reverse bit order of value
\details Reverses the bit order of the given value.
\param [in] value Value to reverse
\return Reversed value
*/
__STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value)
{
uint32_t result;
#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
(defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) )
__ASM volatile ("rbit %0, %1" : "=r" (result) : "r" (value) );
#else
int32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */
result = value; /* r will be reversed bits of v; first get LSB of v */
for (value >>= 1U; value; value >>= 1U)
{
result <<= 1U;
result |= value & 1U;
s--;
}
result <<= s; /* shift when v's highest bits are zero */
#endif
return result;
}
/**
\brief Count leading zeros
\param [in] value Value to count the leading zeros
\return number of leading zeros in value
*/
#define __CLZ (uint8_t)__builtin_clz
/**
\brief LDR Exclusive (8 bit)
\details Executes a exclusive LDR instruction for 8 bit value.
\param [in] ptr Pointer to data
\return value of type uint8_t at (*ptr)
*/
__STATIC_FORCEINLINE uint8_t __LDREXB(volatile uint8_t *addr)
{
uint32_t result;
#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
__ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) );
#else
/* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
accepted by assembler. So has to use following less efficient pattern.
*/
__ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
#endif
return ((uint8_t) result); /* Add explicit type cast here */
}
/**
\brief LDR Exclusive (16 bit)
\details Executes a exclusive LDR instruction for 16 bit values.
\param [in] ptr Pointer to data
\return value of type uint16_t at (*ptr)
*/
__STATIC_FORCEINLINE uint16_t __LDREXH(volatile uint16_t *addr)
{
uint32_t result;
#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
__ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) );
#else
/* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
accepted by assembler. So has to use following less efficient pattern.
*/
__ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
#endif
return ((uint16_t) result); /* Add explicit type cast here */
}
/**
\brief LDR Exclusive (32 bit)
\details Executes a exclusive LDR instruction for 32 bit values.
\param [in] ptr Pointer to data
\return value of type uint32_t at (*ptr)
*/
__STATIC_FORCEINLINE uint32_t __LDREXW(volatile uint32_t *addr)
{
uint32_t result;
__ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) );
return(result);
}
/**
\brief STR Exclusive (8 bit)
\details Executes a exclusive STR instruction for 8 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
\return 0 Function succeeded
\return 1 Function failed
*/
__STATIC_FORCEINLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr)
{
uint32_t result;
__ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
return(result);
}
/**
\brief STR Exclusive (16 bit)
\details Executes a exclusive STR instruction for 16 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
\return 0 Function succeeded
\return 1 Function failed
*/
__STATIC_FORCEINLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr)
{
uint32_t result;
__ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
return(result);
}
/**
\brief STR Exclusive (32 bit)
\details Executes a exclusive STR instruction for 32 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
\return 0 Function succeeded
\return 1 Function failed
*/
__STATIC_FORCEINLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr)
{
uint32_t result;
__ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) );
return(result);
}
/**
\brief Remove the exclusive lock
\details Removes the exclusive lock which is created by LDREX.
*/
__STATIC_FORCEINLINE void __CLREX(void)
{
__ASM volatile ("clrex" ::: "memory");
}
/**
\brief Signed Saturate
\details Saturates a signed value.
\param [in] value Value to be saturated
\param [in] sat Bit position to saturate to (1..32)
\return Saturated value
*/
#define __SSAT(ARG1,ARG2) \
__extension__ \
({ \
int32_t __RES, __ARG1 = (ARG1); \
__ASM ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
__RES; \
})
/**
\brief Unsigned Saturate
\details Saturates an unsigned value.
\param [in] value Value to be saturated
\param [in] sat Bit position to saturate to (0..31)
\return Saturated value
*/
#define __USAT(ARG1,ARG2) \
__extension__ \
({ \
uint32_t __RES, __ARG1 = (ARG1); \
__ASM ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
__RES; \
})
/* ########################### Core Function Access ########################### */
/**
\brief Enable IRQ Interrupts
\details Enables IRQ interrupts by clearing the I-bit in the CPSR.
Can only be executed in Privileged modes.
*/
__STATIC_FORCEINLINE void __enable_irq(void)
{
__ASM volatile ("cpsie i" : : : "memory");
}
/**
\brief Disable IRQ Interrupts
\details Disables IRQ interrupts by setting the I-bit in the CPSR.
Can only be executed in Privileged modes.
*/
__STATIC_FORCEINLINE void __disable_irq(void)
{
__ASM volatile ("cpsid i" : : : "memory");
}
/**
\brief Get FPSCR
\details Returns the current value of the Floating Point Status/Control register.
\return Floating Point Status/Control register value
*/
__STATIC_FORCEINLINE uint32_t __get_FPSCR(void)
{
#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
(defined (__FPU_USED ) && (__FPU_USED == 1U)) )
#if __has_builtin(__builtin_arm_get_fpscr)
// Re-enable using built-in when GCC has been fixed
// || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2)
/* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */
return __builtin_arm_get_fpscr();
#else
uint32_t result;
__ASM volatile ("VMRS %0, fpscr" : "=r" (result) );
return(result);
#endif
#else
return(0U);
#endif
}
/**
\brief Set FPSCR
\details Assigns the given value to the Floating Point Status/Control register.
\param [in] fpscr Floating Point Status/Control value to set
*/
__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr)
{
#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
(defined (__FPU_USED ) && (__FPU_USED == 1U)) )
#if __has_builtin(__builtin_arm_set_fpscr)
// Re-enable using built-in when GCC has been fixed
// || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2)
/* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */
__builtin_arm_set_fpscr(fpscr);
#else
__ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc", "memory");
#endif
#else
(void)fpscr;
#endif
}
/** \brief Get CPSR Register
\return CPSR Register value
*/
__STATIC_FORCEINLINE uint32_t __get_CPSR(void)
{
uint32_t result;
__ASM volatile("MRS %0, cpsr" : "=r" (result) );
return(result);
}
/** \brief Set CPSR Register
\param [in] cpsr CPSR value to set
*/
__STATIC_FORCEINLINE void __set_CPSR(uint32_t cpsr)
{
__ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "memory");
}
/** \brief Get Mode
\return Processor Mode
*/
__STATIC_FORCEINLINE uint32_t __get_mode(void)
{
return (__get_CPSR() & 0x1FU);
}
/** \brief Set Mode
\param [in] mode Mode value to set
*/
__STATIC_FORCEINLINE void __set_mode(uint32_t mode)
{
__ASM volatile("MSR cpsr_c, %0" : : "r" (mode) : "memory");
}
/** \brief Get Stack Pointer
\return Stack Pointer value
*/
__STATIC_FORCEINLINE uint32_t __get_SP(void)
{
uint32_t result;
__ASM volatile("MOV %0, sp" : "=r" (result) : : "memory");
return result;
}
/** \brief Set Stack Pointer
\param [in] stack Stack Pointer value to set
*/
__STATIC_FORCEINLINE void __set_SP(uint32_t stack)
{
__ASM volatile("MOV sp, %0" : : "r" (stack) : "memory");
}
/** \brief Get USR/SYS Stack Pointer
\return USR/SYS Stack Pointer value
*/
__STATIC_FORCEINLINE uint32_t __get_SP_usr(void)
{
uint32_t cpsr = __get_CPSR();
uint32_t result;
__ASM volatile(
"CPS #0x1F \n"
"MOV %0, sp " : "=r"(result) : : "memory"
);
__set_CPSR(cpsr);
__ISB();
return result;
}
/** \brief Set USR/SYS Stack Pointer
\param [in] topOfProcStack USR/SYS Stack Pointer value to set
*/
__STATIC_FORCEINLINE void __set_SP_usr(uint32_t topOfProcStack)
{
uint32_t cpsr = __get_CPSR();
__ASM volatile(
"CPS #0x1F \n"
"MOV sp, %0 " : : "r" (topOfProcStack) : "memory"
);
__set_CPSR(cpsr);
__ISB();
}
/** \brief Get FPEXC
\return Floating Point Exception Control register value
*/
__STATIC_FORCEINLINE uint32_t __get_FPEXC(void)
{
#if (__FPU_PRESENT == 1)
uint32_t result;
__ASM volatile("VMRS %0, fpexc" : "=r" (result) );
return(result);
#else
return(0);
#endif
}
/** \brief Set FPEXC
\param [in] fpexc Floating Point Exception Control value to set
*/
__STATIC_FORCEINLINE void __set_FPEXC(uint32_t fpexc)
{
#if (__FPU_PRESENT == 1)
__ASM volatile ("VMSR fpexc, %0" : : "r" (fpexc) : "memory");
#endif
}
/*
* Include common core functions to access Coprocessor 15 registers
*/
#define __get_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MRC p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : "=r" (Rt) : : "memory" )
#define __set_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MCR p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : : "r" (Rt) : "memory" )
#define __get_CP64(cp, op1, Rt, CRm) __ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" )
#define __set_CP64(cp, op1, Rt, CRm) __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" )
#include "cmsis_cp15.h"
/** \brief Enable Floating Point Unit
Critical section, called from undef handler, so systick is disabled
*/
__STATIC_INLINE void __FPU_Enable(void)
{
__ASM volatile(
//Permit access to VFP/NEON, registers by modifying CPACR
" MRC p15,0,R1,c1,c0,2 \n"
" ORR R1,R1,#0x00F00000 \n"
" MCR p15,0,R1,c1,c0,2 \n"
//Ensure that subsequent instructions occur in the context of VFP/NEON access permitted
" ISB \n"
//Enable VFP/NEON
" VMRS R1,FPEXC \n"
" ORR R1,R1,#0x40000000 \n"
" VMSR FPEXC,R1 \n"
//Initialise VFP/NEON registers to 0
" MOV R2,#0 \n"
//Initialise D16 registers to 0
" VMOV D0, R2,R2 \n"
" VMOV D1, R2,R2 \n"
" VMOV D2, R2,R2 \n"
" VMOV D3, R2,R2 \n"
" VMOV D4, R2,R2 \n"
" VMOV D5, R2,R2 \n"
" VMOV D6, R2,R2 \n"
" VMOV D7, R2,R2 \n"
" VMOV D8, R2,R2 \n"
" VMOV D9, R2,R2 \n"
" VMOV D10,R2,R2 \n"
" VMOV D11,R2,R2 \n"
" VMOV D12,R2,R2 \n"
" VMOV D13,R2,R2 \n"
" VMOV D14,R2,R2 \n"
" VMOV D15,R2,R2 \n"
#if (defined(__ARM_NEON) && (__ARM_NEON == 1))
//Initialise D32 registers to 0
" VMOV D16,R2,R2 \n"
" VMOV D17,R2,R2 \n"
" VMOV D18,R2,R2 \n"
" VMOV D19,R2,R2 \n"
" VMOV D20,R2,R2 \n"
" VMOV D21,R2,R2 \n"
" VMOV D22,R2,R2 \n"
" VMOV D23,R2,R2 \n"
" VMOV D24,R2,R2 \n"
" VMOV D25,R2,R2 \n"
" VMOV D26,R2,R2 \n"
" VMOV D27,R2,R2 \n"
" VMOV D28,R2,R2 \n"
" VMOV D29,R2,R2 \n"
" VMOV D30,R2,R2 \n"
" VMOV D31,R2,R2 \n"
#endif
//Initialise FPSCR to a known state
" VMRS R2,FPSCR \n"
" LDR R3,=0x00086060 \n" //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero.
" AND R2,R2,R3 \n"
" VMSR FPSCR,R2 "
);
}
#pragma GCC diagnostic pop
#endif /* __CMSIS_GCC_H */

View File

@ -0,0 +1,559 @@
/**************************************************************************//**
* @file cmsis_iccarm.h
* @brief CMSIS compiler ICCARM (IAR Compiler for Arm) header file
* @version V5.0.6
* @date 02. March 2018
******************************************************************************/
//------------------------------------------------------------------------------
//
// Copyright (c) 2017-2018 IAR Systems
//
// Licensed under the Apache License, Version 2.0 (the "License")
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//------------------------------------------------------------------------------
#ifndef __CMSIS_ICCARM_H__
#define __CMSIS_ICCARM_H__
#ifndef __ICCARM__
#error This file should only be compiled by ICCARM
#endif
#pragma system_include
#define __IAR_FT _Pragma("inline=forced") __intrinsic
#if (__VER__ >= 8000000)
#define __ICCARM_V8 1
#else
#define __ICCARM_V8 0
#endif
#pragma language=extended
#ifndef __ALIGNED
#if __ICCARM_V8
#define __ALIGNED(x) __attribute__((aligned(x)))
#elif (__VER__ >= 7080000)
/* Needs IAR language extensions */
#define __ALIGNED(x) __attribute__((aligned(x)))
#else
#warning No compiler specific solution for __ALIGNED.__ALIGNED is ignored.
#define __ALIGNED(x)
#endif
#endif
/* Define compiler macros for CPU architecture, used in CMSIS 5.
*/
#if __ARM_ARCH_7A__
/* Macro already defined */
#else
#if defined(__ARM7A__)
#define __ARM_ARCH_7A__ 1
#endif
#endif
#ifndef __ASM
#define __ASM __asm
#endif
#ifndef __INLINE
#define __INLINE inline
#endif
#ifndef __NO_RETURN
#if __ICCARM_V8
#define __NO_RETURN __attribute__((__noreturn__))
#else
#define __NO_RETURN _Pragma("object_attribute=__noreturn")
#endif
#endif
#ifndef __PACKED
/* Needs IAR language extensions */
#if __ICCARM_V8
#define __PACKED __attribute__((packed, aligned(1)))
#else
#define __PACKED __packed
#endif
#endif
#ifndef __PACKED_STRUCT
/* Needs IAR language extensions */
#if __ICCARM_V8
#define __PACKED_STRUCT struct __attribute__((packed, aligned(1)))
#else
#define __PACKED_STRUCT __packed struct
#endif
#endif
#ifndef __PACKED_UNION
/* Needs IAR language extensions */
#if __ICCARM_V8
#define __PACKED_UNION union __attribute__((packed, aligned(1)))
#else
#define __PACKED_UNION __packed union
#endif
#endif
#ifndef __RESTRICT
#define __RESTRICT __restrict
#endif
#ifndef __STATIC_INLINE
#define __STATIC_INLINE static inline
#endif
#ifndef __FORCEINLINE
#define __FORCEINLINE _Pragma("inline=forced")
#endif
#ifndef __STATIC_FORCEINLINE
#define __STATIC_FORCEINLINE __FORCEINLINE __STATIC_INLINE
#endif
#ifndef CMSIS_DEPRECATED
#define CMSIS_DEPRECATED __attribute__((deprecated))
#endif
#ifndef __UNALIGNED_UINT16_READ
#pragma language=save
#pragma language=extended
__IAR_FT uint16_t __iar_uint16_read(void const *ptr)
{
return *(__packed uint16_t*)(ptr);
}
#pragma language=restore
#define __UNALIGNED_UINT16_READ(PTR) __iar_uint16_read(PTR)
#endif
#ifndef __UNALIGNED_UINT16_WRITE
#pragma language=save
#pragma language=extended
__IAR_FT void __iar_uint16_write(void const *ptr, uint16_t val)
{
*(__packed uint16_t*)(ptr) = val;;
}
#pragma language=restore
#define __UNALIGNED_UINT16_WRITE(PTR,VAL) __iar_uint16_write(PTR,VAL)
#endif
#ifndef __UNALIGNED_UINT32_READ
#pragma language=save
#pragma language=extended
__IAR_FT uint32_t __iar_uint32_read(void const *ptr)
{
return *(__packed uint32_t*)(ptr);
}
#pragma language=restore
#define __UNALIGNED_UINT32_READ(PTR) __iar_uint32_read(PTR)
#endif
#ifndef __UNALIGNED_UINT32_WRITE
#pragma language=save
#pragma language=extended
__IAR_FT void __iar_uint32_write(void const *ptr, uint32_t val)
{
*(__packed uint32_t*)(ptr) = val;;
}
#pragma language=restore
#define __UNALIGNED_UINT32_WRITE(PTR,VAL) __iar_uint32_write(PTR,VAL)
#endif
#if 0
#ifndef __UNALIGNED_UINT32 /* deprecated */
#pragma language=save
#pragma language=extended
__packed struct __iar_u32 { uint32_t v; };
#pragma language=restore
#define __UNALIGNED_UINT32(PTR) (((struct __iar_u32 *)(PTR))->v)
#endif
#endif
#ifndef __USED
#if __ICCARM_V8
#define __USED __attribute__((used))
#else
#define __USED _Pragma("__root")
#endif
#endif
#ifndef __WEAK
#if __ICCARM_V8
#define __WEAK __attribute__((weak))
#else
#define __WEAK _Pragma("__weak")
#endif
#endif
#ifndef __ICCARM_INTRINSICS_VERSION__
#define __ICCARM_INTRINSICS_VERSION__ 0
#endif
#if __ICCARM_INTRINSICS_VERSION__ == 2
#if defined(__CLZ)
#undef __CLZ
#endif
#if defined(__REVSH)
#undef __REVSH
#endif
#if defined(__RBIT)
#undef __RBIT
#endif
#if defined(__SSAT)
#undef __SSAT
#endif
#if defined(__USAT)
#undef __USAT
#endif
#include "iccarm_builtin.h"
#define __enable_irq __iar_builtin_enable_interrupt
#define __disable_irq __iar_builtin_disable_interrupt
#define __enable_fault_irq __iar_builtin_enable_fiq
#define __disable_fault_irq __iar_builtin_disable_fiq
#define __arm_rsr __iar_builtin_rsr
#define __arm_wsr __iar_builtin_wsr
#if __FPU_PRESENT
#define __get_FPSCR() (__arm_rsr("FPSCR"))
#else
#define __get_FPSCR() ( 0 )
#endif
#define __set_FPSCR(VALUE) (__arm_wsr("FPSCR", VALUE))
#define __get_CPSR() (__arm_rsr("CPSR"))
#define __get_mode() (__get_CPSR() & 0x1FU)
#define __set_CPSR(VALUE) (__arm_wsr("CPSR", (VALUE)))
#define __set_mode(VALUE) (__arm_wsr("CPSR_c", (VALUE)))
#define __get_FPEXC() (__arm_rsr("FPEXC"))
#define __set_FPEXC(VALUE) (__arm_wsr("FPEXC", VALUE))
#define __get_CP(cp, op1, RT, CRn, CRm, op2) \
((RT) = __arm_rsr("p" # cp ":" # op1 ":c" # CRn ":c" # CRm ":" # op2))
#define __set_CP(cp, op1, RT, CRn, CRm, op2) \
(__arm_wsr("p" # cp ":" # op1 ":c" # CRn ":c" # CRm ":" # op2, (RT)))
#define __get_CP64(cp, op1, Rt, CRm) \
__ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" )
#define __set_CP64(cp, op1, Rt, CRm) \
__ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" )
#include "cmsis_cp15.h"
#define __NOP __iar_builtin_no_operation
#define __CLZ __iar_builtin_CLZ
#define __CLREX __iar_builtin_CLREX
#define __DMB __iar_builtin_DMB
#define __DSB __iar_builtin_DSB
#define __ISB __iar_builtin_ISB
#define __LDREXB __iar_builtin_LDREXB
#define __LDREXH __iar_builtin_LDREXH
#define __LDREXW __iar_builtin_LDREX
#define __RBIT __iar_builtin_RBIT
#define __REV __iar_builtin_REV
#define __REV16 __iar_builtin_REV16
__IAR_FT int16_t __REVSH(int16_t val)
{
return (int16_t) __iar_builtin_REVSH(val);
}
#define __ROR __iar_builtin_ROR
#define __RRX __iar_builtin_RRX
#define __SEV __iar_builtin_SEV
#define __SSAT __iar_builtin_SSAT
#define __STREXB __iar_builtin_STREXB
#define __STREXH __iar_builtin_STREXH
#define __STREXW __iar_builtin_STREX
#define __USAT __iar_builtin_USAT
#define __WFE __iar_builtin_WFE
#define __WFI __iar_builtin_WFI
#define __SADD8 __iar_builtin_SADD8
#define __QADD8 __iar_builtin_QADD8
#define __SHADD8 __iar_builtin_SHADD8
#define __UADD8 __iar_builtin_UADD8
#define __UQADD8 __iar_builtin_UQADD8
#define __UHADD8 __iar_builtin_UHADD8
#define __SSUB8 __iar_builtin_SSUB8
#define __QSUB8 __iar_builtin_QSUB8
#define __SHSUB8 __iar_builtin_SHSUB8
#define __USUB8 __iar_builtin_USUB8
#define __UQSUB8 __iar_builtin_UQSUB8
#define __UHSUB8 __iar_builtin_UHSUB8
#define __SADD16 __iar_builtin_SADD16
#define __QADD16 __iar_builtin_QADD16
#define __SHADD16 __iar_builtin_SHADD16
#define __UADD16 __iar_builtin_UADD16
#define __UQADD16 __iar_builtin_UQADD16
#define __UHADD16 __iar_builtin_UHADD16
#define __SSUB16 __iar_builtin_SSUB16
#define __QSUB16 __iar_builtin_QSUB16
#define __SHSUB16 __iar_builtin_SHSUB16
#define __USUB16 __iar_builtin_USUB16
#define __UQSUB16 __iar_builtin_UQSUB16
#define __UHSUB16 __iar_builtin_UHSUB16
#define __SASX __iar_builtin_SASX
#define __QASX __iar_builtin_QASX
#define __SHASX __iar_builtin_SHASX
#define __UASX __iar_builtin_UASX
#define __UQASX __iar_builtin_UQASX
#define __UHASX __iar_builtin_UHASX
#define __SSAX __iar_builtin_SSAX
#define __QSAX __iar_builtin_QSAX
#define __SHSAX __iar_builtin_SHSAX
#define __USAX __iar_builtin_USAX
#define __UQSAX __iar_builtin_UQSAX
#define __UHSAX __iar_builtin_UHSAX
#define __USAD8 __iar_builtin_USAD8
#define __USADA8 __iar_builtin_USADA8
#define __SSAT16 __iar_builtin_SSAT16
#define __USAT16 __iar_builtin_USAT16
#define __UXTB16 __iar_builtin_UXTB16
#define __UXTAB16 __iar_builtin_UXTAB16
#define __SXTB16 __iar_builtin_SXTB16
#define __SXTAB16 __iar_builtin_SXTAB16
#define __SMUAD __iar_builtin_SMUAD
#define __SMUADX __iar_builtin_SMUADX
#define __SMMLA __iar_builtin_SMMLA
#define __SMLAD __iar_builtin_SMLAD
#define __SMLADX __iar_builtin_SMLADX
#define __SMLALD __iar_builtin_SMLALD
#define __SMLALDX __iar_builtin_SMLALDX
#define __SMUSD __iar_builtin_SMUSD
#define __SMUSDX __iar_builtin_SMUSDX
#define __SMLSD __iar_builtin_SMLSD
#define __SMLSDX __iar_builtin_SMLSDX
#define __SMLSLD __iar_builtin_SMLSLD
#define __SMLSLDX __iar_builtin_SMLSLDX
#define __SEL __iar_builtin_SEL
#define __QADD __iar_builtin_QADD
#define __QSUB __iar_builtin_QSUB
#define __PKHBT __iar_builtin_PKHBT
#define __PKHTB __iar_builtin_PKHTB
#else /* __ICCARM_INTRINSICS_VERSION__ == 2 */
#if !__FPU_PRESENT
#define __get_FPSCR __cmsis_iar_get_FPSR_not_active
#endif
#ifdef __INTRINSICS_INCLUDED
#error intrinsics.h is already included previously!
#endif
#include <intrinsics.h>
#if !__FPU_PRESENT
#define __get_FPSCR() (0)
#endif
#pragma diag_suppress=Pe940
#pragma diag_suppress=Pe177
#define __enable_irq __enable_interrupt
#define __disable_irq __disable_interrupt
#define __enable_fault_irq __enable_fiq
#define __disable_fault_irq __disable_fiq
#define __NOP __no_operation
#define __get_xPSR __get_PSR
__IAR_FT void __set_mode(uint32_t mode)
{
__ASM volatile("MSR cpsr_c, %0" : : "r" (mode) : "memory");
}
__IAR_FT uint32_t __LDREXW(uint32_t volatile *ptr)
{
return __LDREX((unsigned long *)ptr);
}
__IAR_FT uint32_t __STREXW(uint32_t value, uint32_t volatile *ptr)
{
return __STREX(value, (unsigned long *)ptr);
}
__IAR_FT uint32_t __RRX(uint32_t value)
{
uint32_t result;
__ASM("RRX %0, %1" : "=r"(result) : "r" (value) : "cc");
return(result);
}
__IAR_FT uint32_t __ROR(uint32_t op1, uint32_t op2)
{
return (op1 >> op2) | (op1 << ((sizeof(op1)*8)-op2));
}
__IAR_FT uint32_t __get_FPEXC(void)
{
#if (__FPU_PRESENT == 1)
uint32_t result;
__ASM volatile("VMRS %0, fpexc" : "=r" (result) : : "memory");
return(result);
#else
return(0);
#endif
}
__IAR_FT void __set_FPEXC(uint32_t fpexc)
{
#if (__FPU_PRESENT == 1)
__ASM volatile ("VMSR fpexc, %0" : : "r" (fpexc) : "memory");
#endif
}
#define __get_CP(cp, op1, Rt, CRn, CRm, op2) \
__ASM volatile("MRC p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : "=r" (Rt) : : "memory" )
#define __set_CP(cp, op1, Rt, CRn, CRm, op2) \
__ASM volatile("MCR p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : : "r" (Rt) : "memory" )
#define __get_CP64(cp, op1, Rt, CRm) \
__ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" )
#define __set_CP64(cp, op1, Rt, CRm) \
__ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" )
#include "cmsis_cp15.h"
#endif /* __ICCARM_INTRINSICS_VERSION__ == 2 */
#define __BKPT(value) __asm volatile ("BKPT %0" : : "i"(value))
__IAR_FT uint32_t __get_SP_usr(void)
{
uint32_t cpsr;
uint32_t result;
__ASM volatile(
"MRS %0, cpsr \n"
"CPS #0x1F \n" // no effect in USR mode
"MOV %1, sp \n"
"MSR cpsr_c, %2 \n" // no effect in USR mode
"ISB" : "=r"(cpsr), "=r"(result) : "r"(cpsr) : "memory"
);
return result;
}
__IAR_FT void __set_SP_usr(uint32_t topOfProcStack)
{
uint32_t cpsr;
__ASM volatile(
"MRS %0, cpsr \n"
"CPS #0x1F \n" // no effect in USR mode
"MOV sp, %1 \n"
"MSR cpsr_c, %2 \n" // no effect in USR mode
"ISB" : "=r"(cpsr) : "r" (topOfProcStack), "r"(cpsr) : "memory"
);
}
#define __get_mode() (__get_CPSR() & 0x1FU)
__STATIC_INLINE
void __FPU_Enable(void)
{
__ASM volatile(
//Permit access to VFP/NEON, registers by modifying CPACR
" MRC p15,0,R1,c1,c0,2 \n"
" ORR R1,R1,#0x00F00000 \n"
" MCR p15,0,R1,c1,c0,2 \n"
//Ensure that subsequent instructions occur in the context of VFP/NEON access permitted
" ISB \n"
//Enable VFP/NEON
" VMRS R1,FPEXC \n"
" ORR R1,R1,#0x40000000 \n"
" VMSR FPEXC,R1 \n"
//Initialise VFP/NEON registers to 0
" MOV R2,#0 \n"
//Initialise D16 registers to 0
" VMOV D0, R2,R2 \n"
" VMOV D1, R2,R2 \n"
" VMOV D2, R2,R2 \n"
" VMOV D3, R2,R2 \n"
" VMOV D4, R2,R2 \n"
" VMOV D5, R2,R2 \n"
" VMOV D6, R2,R2 \n"
" VMOV D7, R2,R2 \n"
" VMOV D8, R2,R2 \n"
" VMOV D9, R2,R2 \n"
" VMOV D10,R2,R2 \n"
" VMOV D11,R2,R2 \n"
" VMOV D12,R2,R2 \n"
" VMOV D13,R2,R2 \n"
" VMOV D14,R2,R2 \n"
" VMOV D15,R2,R2 \n"
#ifdef __ARM_ADVANCED_SIMD__
//Initialise D32 registers to 0
" VMOV D16,R2,R2 \n"
" VMOV D17,R2,R2 \n"
" VMOV D18,R2,R2 \n"
" VMOV D19,R2,R2 \n"
" VMOV D20,R2,R2 \n"
" VMOV D21,R2,R2 \n"
" VMOV D22,R2,R2 \n"
" VMOV D23,R2,R2 \n"
" VMOV D24,R2,R2 \n"
" VMOV D25,R2,R2 \n"
" VMOV D26,R2,R2 \n"
" VMOV D27,R2,R2 \n"
" VMOV D28,R2,R2 \n"
" VMOV D29,R2,R2 \n"
" VMOV D30,R2,R2 \n"
" VMOV D31,R2,R2 \n"
#endif
//Initialise FPSCR to a known state
" VMRS R2,FPSCR \n"
" MOV32 R3,#0x00086060 \n" //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero.
" AND R2,R2,R3 \n"
" VMSR FPSCR,R2 \n");
}
#undef __IAR_FT
#undef __ICCARM_V8
#pragma diag_default=Pe940
#pragma diag_default=Pe177
#endif /* __CMSIS_ICCARM_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,186 @@
/**************************************************************************//**
* @file irq_ctrl.h
* @brief Interrupt Controller API header file
* @version V1.0.0
* @date 23. June 2017
******************************************************************************/
/*
* Copyright (c) 2017 ARM Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#if defined ( __ICCARM__ )
#pragma system_include /* treat file as system include file for MISRA check */
#elif defined (__clang__)
#pragma clang system_header /* treat file as system include file */
#endif
#ifndef IRQ_CTRL_H_
#define IRQ_CTRL_H_
#include <stdint.h>
#ifndef IRQHANDLER_T
#define IRQHANDLER_T
/// Interrupt handler data type
typedef void (*IRQHandler_t) (void);
#endif
#ifndef IRQN_ID_T
#define IRQN_ID_T
/// Interrupt ID number data type
typedef int32_t IRQn_ID_t;
#endif
/* Interrupt mode bit-masks */
#define IRQ_MODE_TRIG_Pos (0U)
#define IRQ_MODE_TRIG_Msk (0x07UL /*<< IRQ_MODE_TRIG_Pos*/)
#define IRQ_MODE_TRIG_LEVEL (0x00UL /*<< IRQ_MODE_TRIG_Pos*/) ///< Trigger: level triggered interrupt
#define IRQ_MODE_TRIG_LEVEL_LOW (0x01UL /*<< IRQ_MODE_TRIG_Pos*/) ///< Trigger: low level triggered interrupt
#define IRQ_MODE_TRIG_LEVEL_HIGH (0x02UL /*<< IRQ_MODE_TRIG_Pos*/) ///< Trigger: high level triggered interrupt
#define IRQ_MODE_TRIG_EDGE (0x04UL /*<< IRQ_MODE_TRIG_Pos*/) ///< Trigger: edge triggered interrupt
#define IRQ_MODE_TRIG_EDGE_RISING (0x05UL /*<< IRQ_MODE_TRIG_Pos*/) ///< Trigger: rising edge triggered interrupt
#define IRQ_MODE_TRIG_EDGE_FALLING (0x06UL /*<< IRQ_MODE_TRIG_Pos*/) ///< Trigger: falling edge triggered interrupt
#define IRQ_MODE_TRIG_EDGE_BOTH (0x07UL /*<< IRQ_MODE_TRIG_Pos*/) ///< Trigger: rising and falling edge triggered interrupt
#define IRQ_MODE_TYPE_Pos (3U)
#define IRQ_MODE_TYPE_Msk (0x01UL << IRQ_MODE_TYPE_Pos)
#define IRQ_MODE_TYPE_IRQ (0x00UL << IRQ_MODE_TYPE_Pos) ///< Type: interrupt source triggers CPU IRQ line
#define IRQ_MODE_TYPE_FIQ (0x01UL << IRQ_MODE_TYPE_Pos) ///< Type: interrupt source triggers CPU FIQ line
#define IRQ_MODE_DOMAIN_Pos (4U)
#define IRQ_MODE_DOMAIN_Msk (0x01UL << IRQ_MODE_DOMAIN_Pos)
#define IRQ_MODE_DOMAIN_NONSECURE (0x00UL << IRQ_MODE_DOMAIN_Pos) ///< Domain: interrupt is targeting non-secure domain
#define IRQ_MODE_DOMAIN_SECURE (0x01UL << IRQ_MODE_DOMAIN_Pos) ///< Domain: interrupt is targeting secure domain
#define IRQ_MODE_CPU_Pos (5U)
#define IRQ_MODE_CPU_Msk (0xFFUL << IRQ_MODE_CPU_Pos)
#define IRQ_MODE_CPU_ALL (0x00UL << IRQ_MODE_CPU_Pos) ///< CPU: interrupt targets all CPUs
#define IRQ_MODE_CPU_0 (0x01UL << IRQ_MODE_CPU_Pos) ///< CPU: interrupt targets CPU 0
#define IRQ_MODE_CPU_1 (0x02UL << IRQ_MODE_CPU_Pos) ///< CPU: interrupt targets CPU 1
#define IRQ_MODE_CPU_2 (0x04UL << IRQ_MODE_CPU_Pos) ///< CPU: interrupt targets CPU 2
#define IRQ_MODE_CPU_3 (0x08UL << IRQ_MODE_CPU_Pos) ///< CPU: interrupt targets CPU 3
#define IRQ_MODE_CPU_4 (0x10UL << IRQ_MODE_CPU_Pos) ///< CPU: interrupt targets CPU 4
#define IRQ_MODE_CPU_5 (0x20UL << IRQ_MODE_CPU_Pos) ///< CPU: interrupt targets CPU 5
#define IRQ_MODE_CPU_6 (0x40UL << IRQ_MODE_CPU_Pos) ///< CPU: interrupt targets CPU 6
#define IRQ_MODE_CPU_7 (0x80UL << IRQ_MODE_CPU_Pos) ///< CPU: interrupt targets CPU 7
#define IRQ_MODE_ERROR (0x80000000UL) ///< Bit indicating mode value error
/* Interrupt priority bit-masks */
#define IRQ_PRIORITY_Msk (0x0000FFFFUL) ///< Interrupt priority value bit-mask
#define IRQ_PRIORITY_ERROR (0x80000000UL) ///< Bit indicating priority value error
/// Initialize interrupt controller.
/// \return 0 on success, -1 on error.
int32_t IRQ_Initialize (void);
/// Register interrupt handler.
/// \param[in] irqn interrupt ID number
/// \param[in] handler interrupt handler function address
/// \return 0 on success, -1 on error.
int32_t IRQ_SetHandler (IRQn_ID_t irqn, IRQHandler_t handler);
/// Get the registered interrupt handler.
/// \param[in] irqn interrupt ID number
/// \return registered interrupt handler function address.
IRQHandler_t IRQ_GetHandler (IRQn_ID_t irqn);
/// Enable interrupt.
/// \param[in] irqn interrupt ID number
/// \return 0 on success, -1 on error.
int32_t IRQ_Enable (IRQn_ID_t irqn);
/// Disable interrupt.
/// \param[in] irqn interrupt ID number
/// \return 0 on success, -1 on error.
int32_t IRQ_Disable (IRQn_ID_t irqn);
/// Get interrupt enable state.
/// \param[in] irqn interrupt ID number
/// \return 0 - interrupt is disabled, 1 - interrupt is enabled.
uint32_t IRQ_GetEnableState (IRQn_ID_t irqn);
/// Configure interrupt request mode.
/// \param[in] irqn interrupt ID number
/// \param[in] mode mode configuration
/// \return 0 on success, -1 on error.
int32_t IRQ_SetMode (IRQn_ID_t irqn, uint32_t mode);
/// Get interrupt mode configuration.
/// \param[in] irqn interrupt ID number
/// \return current interrupt mode configuration with optional IRQ_MODE_ERROR bit set.
uint32_t IRQ_GetMode (IRQn_ID_t irqn);
/// Get ID number of current interrupt request (IRQ).
/// \return interrupt ID number.
IRQn_ID_t IRQ_GetActiveIRQ (void);
/// Get ID number of current fast interrupt request (FIQ).
/// \return interrupt ID number.
IRQn_ID_t IRQ_GetActiveFIQ (void);
/// Signal end of interrupt processing.
/// \param[in] irqn interrupt ID number
/// \return 0 on success, -1 on error.
int32_t IRQ_EndOfInterrupt (IRQn_ID_t irqn);
/// Set interrupt pending flag.
/// \param[in] irqn interrupt ID number
/// \return 0 on success, -1 on error.
int32_t IRQ_SetPending (IRQn_ID_t irqn);
/// Get interrupt pending flag.
/// \param[in] irqn interrupt ID number
/// \return 0 - interrupt is not pending, 1 - interrupt is pending.
uint32_t IRQ_GetPending (IRQn_ID_t irqn);
/// Clear interrupt pending flag.
/// \param[in] irqn interrupt ID number
/// \return 0 on success, -1 on error.
int32_t IRQ_ClearPending (IRQn_ID_t irqn);
/// Set interrupt priority value.
/// \param[in] irqn interrupt ID number
/// \param[in] priority interrupt priority value
/// \return 0 on success, -1 on error.
int32_t IRQ_SetPriority (IRQn_ID_t irqn, uint32_t priority);
/// Get interrupt priority.
/// \param[in] irqn interrupt ID number
/// \return current interrupt priority value with optional IRQ_PRIORITY_ERROR bit set.
uint32_t IRQ_GetPriority (IRQn_ID_t irqn);
/// Set priority masking threshold.
/// \param[in] priority priority masking threshold value
/// \return 0 on success, -1 on error.
int32_t IRQ_SetPriorityMask (uint32_t priority);
/// Get priority masking threshold
/// \return current priority masking threshold value with optional IRQ_PRIORITY_ERROR bit set.
uint32_t IRQ_GetPriorityMask (void);
/// Set priority grouping field split point
/// \param[in] bits number of MSB bits included in the group priority field comparison
/// \return 0 on success, -1 on error.
int32_t IRQ_SetPriorityGroupBits (uint32_t bits);
/// Get priority grouping field split point
/// \return current number of MSB bits included in the group priority field comparison with
/// optional IRQ_PRIORITY_ERROR bit set.
uint32_t IRQ_GetPriorityGroupBits (void);
#endif // IRQ_CTRL_H_