Merge pull request #1545 from TomoYamanaka/master

Support of export function to the IAR.
pull/1544/merge
Martin Kojtal 2016-02-18 09:42:15 +00:00
commit b57f7d5684
28 changed files with 6409 additions and 26 deletions

View File

@ -334,8 +334,8 @@ void usb1_function_Vendor4(uint16_t type, uint16_t req, uint16_t value, uint
void usb1_function_Vendor5(uint16_t type, uint16_t req, uint16_t value, uint16_t index, uint16_t length);
void usb1_function_ResetDescriptor(uint16_t mode);
uint16_t Userdef_USB_usb1_function_d0fifo_dmaintid(void);
uint16_t Userdef_USB_usb1_function_d1fifo_dmaintid(void);
IRQn_Type Userdef_USB_usb1_function_d0fifo_dmaintid(void);
IRQn_Type Userdef_USB_usb1_function_d1fifo_dmaintid(void);
void Userdef_USB_usb1_function_attach(void);
void Userdef_USB_usb1_function_detach(void);
void Userdef_USB_usb1_function_delay_1ms(void);

View File

@ -140,7 +140,7 @@ void USBEndpoint::queueTransfer()
//Now add this free TD at this end of the queue
state = USB_TYPE_PROCESSING;
td_current->nextTD = td_next;
td_current->nextTD = (hcTd*)td_next;
hced->tailTD = td_next;
}
@ -158,5 +158,5 @@ void USBEndpoint::unqueueTransfer(volatile HCTD * td)
void USBEndpoint::queueEndpoint(USBEndpoint * ed)
{
nextEp = ed;
hced->nextED = (ed == NULL) ? 0 : ed->getHCED();
hced->nextED = (ed == NULL) ? 0 : (hcEd*)(ed->getHCED());
}

View File

@ -304,7 +304,7 @@ void USBHost::transferCompleted(volatile uint32_t addr)
do {
volatile HCTD* td = (volatile HCTD*)addr;
addr = (uint32_t)td->nextTD; //Dequeue from physical list
td->nextTD = tdList; //Enqueue into reversed list
td->nextTD = (hcTd*)tdList; //Enqueue into reversed list
tdList = td;
} while(addr);

View File

@ -136,10 +136,10 @@ enum ENDPOINT_TYPE {
#define CONFIGURATION_DESCRIPTOR_LENGTH 0x09
// ------------ HostController Transfer Descriptor ------------
typedef struct HCTD {
typedef struct hcTd {
__IO uint32_t control; // Transfer descriptor control
__IO uint8_t * currBufPtr; // Physical address of current buffer pointer
__IO HCTD * nextTD; // Physical pointer to next Transfer Descriptor
__IO hcTd * nextTD; // Physical pointer to next Transfer Descriptor
__IO uint8_t * bufEnd; // Physical address of end of buffer
void * ep; // ep address where a td is linked in
uint32_t dummy[3]; // padding

View File

@ -0,0 +1,62 @@
/*###ICF### Section handled by ICF editor, don't touch! ****/
/*-Editor annotation file-*/
/* IcfEditorFile="$TOOLKIT_DIR$\config\ide\IcfEditor\a_v1_0.xml" */
/*-Specials-*/
define symbol __ICFEDIT_intvec_start__ = 0x18004000;
/*-Memory Regions-*/
define symbol __ICFEDIT_region_ROM_start__ = 0x18000000;
define symbol __ICFEDIT_region_ROM_end__ = 0x187FFFFF;
define symbol __ICFEDIT_region_TTB_start__ = 0x20000000;
define symbol __ICFEDIT_region_TTB_end__ = 0x2001FFFF;
define symbol __ICFEDIT_region_RAM_start__ = 0x20020000;
define symbol __ICFEDIT_region_RAM_end__ = 0x209FFFFF;
/*-Sizes-*/
define symbol __ICFEDIT_size_cstack__ = 0x00004000;
define symbol __ICFEDIT_size_svcstack__ = 0x00008000;
define symbol __ICFEDIT_size_irqstack__ = 0x00008000;
define symbol __ICFEDIT_size_fiqstack__ = 0x00000100;
define symbol __ICFEDIT_size_undstack__ = 0x00000100;
define symbol __ICFEDIT_size_abtstack__ = 0x00000100;
define symbol __ICFEDIT_size_heap__ = 0x00080000;
/**** End of ICF editor section. ###ICF###*/
define symbol __ICFEDIT_region_RetRAM_start__ = 0x20000000;
define symbol __ICFEDIT_region_RetRAM_end__ = 0x2001FFFF;
define symbol __ICFEDIT_region_MirrorRAM_start__ = 0x60900000;
define symbol __ICFEDIT_region_MirrorRAM_end__ = 0x609FFFFF;
define symbol __ICFEDIT_region_MirrorRetRAM_start__ = 0x60000000;
define symbol __ICFEDIT_region_MirrorRetRAM_end__ = 0x6001FFFF;
define memory mem with size = 4G;
define region ROM_region = mem:[from __ICFEDIT_region_ROM_start__ to __ICFEDIT_region_ROM_end__];
define region RAM_region = mem:[from __ICFEDIT_region_RAM_start__ to __ICFEDIT_region_RAM_end__];
define region RetRAM_region = mem:[from __ICFEDIT_region_RetRAM_start__ to __ICFEDIT_region_RetRAM_end__];
define region MirrorRAM_region = mem:[from __ICFEDIT_region_MirrorRAM_start__ to __ICFEDIT_region_MirrorRAM_end__];
define region MirrorRetRAM_region = mem:[from __ICFEDIT_region_MirrorRetRAM_start__ to __ICFEDIT_region_MirrorRetRAM_end__];
define block CSTACK with alignment = 8, size = __ICFEDIT_size_cstack__ { };
define block SVC_STACK with alignment = 8, size = __ICFEDIT_size_svcstack__ { };
define block IRQ_STACK with alignment = 8, size = __ICFEDIT_size_irqstack__ { };
define block FIQ_STACK with alignment = 8, size = __ICFEDIT_size_fiqstack__ { };
define block UND_STACK with alignment = 8, size = __ICFEDIT_size_undstack__ { };
define block ABT_STACK with alignment = 8, size = __ICFEDIT_size_abtstack__ { };
define block HEAP with alignment = 8, size = __ICFEDIT_size_heap__ { };
initialize by copy { readwrite };
do not initialize { section .noinit };
do not initialize { section MMU_TT };
place at address mem:__ICFEDIT_intvec_start__ { readonly section .intvec };
place in ROM_region { readonly };
place in RAM_region { readwrite,
block CSTACK, block SVC_STACK, block IRQ_STACK, block FIQ_STACK,
block UND_STACK, block ABT_STACK, block HEAP };
place in RetRAM_region { section .retram };
place in MirrorRAM_region { section .mirrorram };
place in MirrorRetRAM_region { section .mirrorretram };

View File

@ -0,0 +1,505 @@
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Part one of the system initialization code,
;; contains low-level
;; initialization.
;;
;; Copyright 2007 IAR Systems. All rights reserved.
;;
;; $Revision: 49919 $
;;
MODULE ?cstartup
;; Forward declaration of sections.
SECTION SVC_STACK:DATA:NOROOT(3)
SECTION IRQ_STACK:DATA:NOROOT(3)
SECTION ABT_STACK:DATA:NOROOT(3)
SECTION FIQ_STACK:DATA:NOROOT(3)
SECTION UND_STACK:DATA:NOROOT(3)
SECTION CSTACK:DATA:NOROOT(3)
;
; The module in this file are included in the libraries, and may be
; replaced by any user-defined modules that define the PUBLIC symbol
; __iar_program_start or a user defined start symbol.
;
; To override the cstartup defined in the library, simply add your
; modified version to the workbench project.
SECTION .intvec:CODE:NOROOT(2)
PUBLIC __vector
PUBLIC __iar_program_start
PUBLIC Undefined_Handler
EXTERN SWI_Handler
PUBLIC Prefetch_Handler
PUBLIC Abort_Handler
PUBLIC IRQ_Handler
PUBLIC FIQ_Handler
EXTERN VbarInit
EXTERN SetLowVectors
EXTERN init_TTB
EXTERN enable_mmu
EXTERN Peripheral_BasicInit
EXTERN initsct
EXTERN PowerON_Reset
PUBLIC FPUEnable
DATA
__iar_init$$done: ; The vector table is not needed
; until after copy initialization is done
__vector: ; Make this a DATA label, so that stack usage
; analysis doesn't consider it an uncalled fun
ARM
; All default exception handlers (except reset) are
; defined as weak symbol definitions.
; If a handler is defined by the application it will take precedence.
LDR PC,Reset_Addr ; Reset
LDR PC,Undefined_Addr ; Undefined instructions
LDR PC,SWI_Addr ; Software interrupt (SWI/SVC)
LDR PC,Prefetch_Addr ; Prefetch abort
LDR PC,Abort_Addr ; Data abort
DCD 0 ; RESERVED
LDR PC,IRQ_Addr ; IRQ
LDR PC,FIQ_Addr ; FIQ
DATA
Reset_Addr: DCD __iar_program_start
Undefined_Addr: DCD Undefined_Handler
SWI_Addr: DCD SWI_Handler
Prefetch_Addr: DCD Prefetch_Handler
Abort_Addr: DCD Abort_Handler
IRQ_Addr: DCD IRQ_Handler
FIQ_Addr: DCD FIQ_Handler
; --------------------------------------------------
; ?cstartup -- low-level system initialization code.
;
; After a reset execution starts here, the mode is ARM, supervisor
; with interrupts disabled.
;
SECTION .text:CODE:NOROOT(2)
EXTERN RZ_A1_SetSramWriteEnable
EXTERN create_translation_table
EXTERN SystemInit
EXTERN InitMemorySubsystem
EXTERN __cmain
REQUIRE __vector
EXTWEAK __iar_init_core
EXTWEAK __iar_init_vfp
ARM
__iar_program_start:
?cstartup:
;;; @ Put any cores other than 0 to sleep
mrc p15, 0, r0, c0, c0, 5 ;;; @ Read MPIDR
ands r0, r0, #3
goToSleep:
wfine
bne goToSleep
//@ Enable access to NEON/VFP by enabling access to Coprocessors 10 and 11.
//@ Enables Full Access i.e. in both privileged and non privileged modes
mrc p15, 0, r0, c1, c0, 2 ;@ Read Coprocessor Access Control Register (CPACR)
orr r0, r0, #(0xF << 20) ;@ Enable access to CP 10 & 11
mcr p15, 0, r0, c1, c0, 2 ;@ Write Coprocessor Access Control Register (CPACR)
isb
;; Switch on the VFP and NEON hardware
mov r0, #0x40000000
vmsr fpexc, r0 ;@ Write FPEXC register, EN bit set
mrc p15, 0, r0, c1, c0, 0 ;@ Read CP15 System Control register
bic r0, r0, #(0x1 << 12) ;@ Clear I bit 12 to disable I Cache
bic r0, r0, #(0x1 << 2) ;@ Clear C bit 2 to disable D Cache
bic r0, r0, #0x1 ;@ Clear M bit 0 to disable MMU
bic r0, r0, #(0x1 << 11) ;@ Clear Z bit 11 to disable branch prediction
bic r0, r0, #(0x1 << 13) ;@ Clear V bit 13 to disable hivecs
mcr p15, 0, r0, c1, c0, 0 ;@ Write value back to CP15 System Control register
isb
;; Set Vector Base Address Register (VBAR) to point to this application's vector table
ldr r0, =__vector
mcr p15, 0, r0, c12, c0, 0
;
; Add initialization needed before setup of stackpointers here.
;
;
; Initialize the stack pointers.
; The pattern below can be used for any of the exception stacks:
; FIQ, IRQ, SVC, ABT, UND, SYS.
; The USR mode uses the same stack as SYS.
; The stack segments must be defined in the linker command file,
; and be declared above.
;
; --------------------
; Mode, correspords to bits 0-5 in CPSR
#define MODE_MSK 0x1F ; Bit mask for mode bits in CPSR
#define USR_MODE 0x10 ; User mode
#define FIQ_MODE 0x11 ; Fast Interrupt Request mode
#define IRQ_MODE 0x12 ; Interrupt Request mode
#define SVC_MODE 0x13 ; Supervisor mode
#define ABT_MODE 0x17 ; Abort mode
#define UND_MODE 0x1B ; Undefined Instruction mode
#define SYS_MODE 0x1F ; System mode
#define Mode_SVC 0x13
#define Mode_ABT 0x17
#define Mode_UND 0x1B
#define GICI_BASE 0xe8202000
#define ICCIAR_OFFSET 0x0000000C
#define ICCEOIR_OFFSET 0x00000010
#define ICCHPIR_OFFSET 0x00000018
#define GICD_BASE 0xe8201000
#define GIC_ERRATA_CHECK_1 0x000003FE
#define GIC_ERRATA_CHECK_2 0x000003FF
#define ICDABR0_OFFSET 0x00000300
#define ICDIPR0_OFFSET 0x00000400
#define T_Bit 0x20 ; when T bit is set, core is in Thumb state
MRS r0, cpsr ; Original PSR value
;; Set up the SVC stack pointer.
BIC r0, r0, #MODE_MSK ; Clear the mode bits
ORR r0, r0, #SVC_MODE ; Set SVC mode bits
MSR cpsr_c, r0 ; Change the mode
LDR sp, =SFE(SVC_STACK) ; End of SVC_STACK
BIC sp,sp,#0x7 ; Make sure SP is 8 aligned
;; Set up the interrupt stack pointer.
BIC r0, r0, #MODE_MSK ; Clear the mode bits
ORR r0, r0, #IRQ_MODE ; Set IRQ mode bits
MSR cpsr_c, r0 ; Change the mode
LDR sp, =SFE(IRQ_STACK) ; End of IRQ_STACK
BIC sp,sp,#0x7 ; Make sure SP is 8 aligned
;; Set up the fast interrupt stack pointer.
BIC r0, r0, #MODE_MSK ; Clear the mode bits
ORR r0, r0, #FIQ_MODE ; Set FIR mode bits
MSR cpsr_c, r0 ; Change the mode
LDR sp, =SFE(FIQ_STACK) ; End of FIQ_STACK
BIC sp,sp,#0x7 ; Make sure SP is 8 aligned
;; Set up the ABT stack pointer.
BIC r0 ,r0, #MODE_MSK ; Clear the mode bits
ORR r0 ,r0, #ABT_MODE ; Set System mode bits
MSR cpsr_c, r0 ; Change the mode
LDR sp, =SFE(ABT_STACK) ; End of CSTACK
BIC sp,sp,#0x7 ; Make sure SP is 8 aligned
;; Set up the UDF stack pointer.
BIC r0 ,r0, #MODE_MSK ; Clear the mode bits
ORR r0 ,r0, #UND_MODE ; Set System mode bits
MSR cpsr_c, r0 ; Change the mode
LDR sp, =SFE(UND_STACK) ; End of CSTACK
BIC sp,sp,#0x7 ; Make sure SP is 8 aligned
;; Set up the normal stack pointer.
BIC r0 ,r0, #MODE_MSK ; Clear the mode bits
ORR r0 ,r0, #SYS_MODE ; Set System mode bits
MSR cpsr_c, r0 ; Change the mode
LDR sp, =SFE(CSTACK) ; End of CSTACK
BIC sp,sp,#0x7 ; Make sure SP is 8 aligned
;;;
isb
ldr r0, =RZ_A1_SetSramWriteEnable
blx r0
bl create_translation_table
; USR/SYS stack pointer will be set during kernel init
ldr r0, =SystemInit
blx r0
ldr r0, =InitMemorySubsystem
blx r0
; fp_init
mov r0, #0x3000000
vmsr fpscr, r0
;;; Continue to __cmain for C-level initialization.
FUNCALL __iar_program_start, __cmain
B __cmain
ldr r0, sf_boot ;@ dummy to keep boot loader area
loop_here:
b loop_here
sf_boot:
DC32 0x00000001
Undefined_Handler:
EXTERN CUndefHandler
SRSDB SP!, #Mode_UND
PUSH {R0-R4, R12} /* Save APCS corruptible registers to UND mode stack */
MRS R0, SPSR
TST R0, #T_Bit /* Check mode */
MOVEQ R1, #4 /* R1 = 4 ARM mode */
MOVNE R1, #2 /* R1 = 2 Thumb mode */
SUB R0, LR, R1
LDREQ R0, [R0] /* ARM mode - R0 points to offending instruction */
BEQ undef_cont
/* Thumb instruction */
/* Determine if it is a 32-bit Thumb instruction */
LDRH R0, [R0]
MOV R2, #0x1c
CMP R2, R0, LSR #11
BHS undef_cont /* 16-bit Thumb instruction */
/* 32-bit Thumb instruction. Unaligned - we need to reconstruct the offending instruction. */
LDRH R2, [LR]
ORR R0, R2, R0, LSL #16
undef_cont:
MOV R2, LR /* Set LR to third argument */
/* AND R12, SP, #4 */ /* Ensure stack is 8-byte aligned */
MOV R3, SP /* Ensure stack is 8-byte aligned */
AND R12, R3, #4
SUB SP, SP, R12 /* Adjust stack */
PUSH {R12, LR} /* Store stack adjustment and dummy LR */
/* R0 Offending instruction */
/* R1 =2 (Thumb) or =4 (ARM) */
BL CUndefHandler
POP {R12, LR} /* Get stack adjustment & discard dummy LR */
ADD SP, SP, R12 /* Unadjust stack */
LDR LR, [SP, #24] /* Restore stacked LR and possibly adjust for retry */
SUB LR, LR, R0
LDR R0, [SP, #28] /* Restore stacked SPSR */
MSR SPSR_cxsf, R0
POP {R0-R4, R12} /* Restore stacked APCS registers */
ADD SP, SP, #8 /* Adjust SP for already-restored banked registers */
MOVS PC, LR
Prefetch_Handler:
EXTERN CPAbtHandler
SUB LR, LR, #4 /* Pre-adjust LR */
SRSDB SP!, #Mode_ABT /* Save LR and SPRS to ABT mode stack */
PUSH {R0-R4, R12} /* Save APCS corruptible registers to ABT mode stack */
MRC p15, 0, R0, c5, c0, 1 /* IFSR */
MRC p15, 0, R1, c6, c0, 2 /* IFAR */
MOV R2, LR /* Set LR to third argument */
/* AND R12, SP, #4 */ /* Ensure stack is 8-byte aligned */
MOV R3, SP /* Ensure stack is 8-byte aligned */
AND R12, R3, #4
SUB SP, SP, R12 /* Adjust stack */
PUSH {R12, LR} /* Store stack adjustment and dummy LR */
BL CPAbtHandler
POP {R12, LR} /* Get stack adjustment & discard dummy LR */
ADD SP, SP, R12 /* Unadjust stack */
POP {R0-R4, R12} /* Restore stack APCS registers */
RFEFD SP! /* Return from exception */
Abort_Handler:
EXTERN CDAbtHandler
SUB LR, LR, #8 /* Pre-adjust LR */
SRSDB SP!, #Mode_ABT /* Save LR and SPRS to ABT mode stack */
PUSH {R0-R4, R12} /* Save APCS corruptible registers to ABT mode stack */
CLREX /* State of exclusive monitors unknown after taken data abort */
MRC p15, 0, R0, c5, c0, 0 /* DFSR */
MRC p15, 0, R1, c6, c0, 0 /* DFAR */
MOV R2, LR /* Set LR to third argument */
/* AND R12, SP, #4 */ /* Ensure stack is 8-byte aligned */
MOV R3, SP /* Ensure stack is 8-byte aligned */
AND R12, R3, #4
SUB SP, SP, R12 /* Adjust stack */
PUSH {R12, LR} /* Store stack adjustment and dummy LR */
BL CDAbtHandler
POP {R12, LR} /* Get stack adjustment & discard dummy LR */
ADD SP, SP, R12 /* Unadjust stack */
POP {R0-R4, R12} /* Restore stacked APCS registers */
RFEFD SP! /* Return from exception */
FIQ_Handler:
/* An FIQ might occur between the dummy read and the real read of the GIC in IRQ_Handler,
* so if a real FIQ Handler is implemented, this will be needed before returning:
*/
/* LDR R1, =GICI_BASE
LDR R0, [R1, #ICCHPIR_OFFSET] ; Dummy Read ICCHPIR (GIC CPU Interface register) to avoid GIC 390 errata 801120
*/
B .
EXTERN SVC_Handler /* refer RTX function */
IRQ_Handler:
EXTERN IRQCount
EXTERN IRQTable
EXTERN IRQNestLevel
/* prologue */
SUB LR, LR, #4 /* Pre-adjust LR */
SRSDB SP!, #Mode_SVC /* Save LR_IRQ and SPRS_IRQ to SVC mode stack */
CPS #Mode_SVC /* Switch to SVC mode, to avoid a nested interrupt corrupting LR on a BL */
PUSH {R0-R3, R12} /* Save remaining APCS corruptible registers to SVC stack */
/* AND R1, SP, #4 */ /* Ensure stack is 8-byte aligned */
MOV R3, SP /* Ensure stack is 8-byte aligned */
AND R1, R3, #4
SUB SP, SP, R1 /* Adjust stack */
PUSH {R1, LR} /* Store stack adjustment and LR_SVC to SVC stack */
LDR R0, =IRQNestLevel /* Get address of nesting counter */
LDR R1, [R0]
ADD R1, R1, #1 /* Increment nesting counter */
STR R1, [R0]
/* identify and acknowledge interrupt */
LDR R1, =GICI_BASE
LDR R0, [R1, #ICCHPIR_OFFSET] /* Dummy Read ICCHPIR (GIC CPU Interface register) to avoid GIC 390 errata 801120 */
LDR R0, [R1, #ICCIAR_OFFSET] /* Read ICCIAR (GIC CPU Interface register) */
DSB /* Ensure that interrupt acknowledge completes before re-enabling interrupts */
/* Workaround GIC 390 errata 733075
* If the ID is not 0, then service the interrupt as normal.
* If the ID is 0 and active, then service interrupt ID 0 as normal.
* If the ID is 0 but not active, then the GIC CPU interface may be locked-up, so unlock it
* with a dummy write to ICDIPR0. This interrupt should be treated as spurious and not serviced.
*/
LDR R2, =GICD_BASE
LDR R3, =GIC_ERRATA_CHECK_1
CMP R0, R3
BEQ unlock_cpu
LDR R3, =GIC_ERRATA_CHECK_2
CMP R0, R3
BEQ unlock_cpu
CMP R0, #0
BNE int_active /* If the ID is not 0, then service the interrupt */
LDR R3, [R2, #ICDABR0_OFFSET] /* Get the interrupt state */
TST R3, #1
BNE int_active /* If active, then service the interrupt */
unlock_cpu:
LDR R3, [R2, #ICDIPR0_OFFSET] /* Not active, so unlock the CPU interface */
STR R3, [R2, #ICDIPR0_OFFSET] /* with a dummy write */
DSB /* Ensure the write completes before continuing */
B ret_irq /* Do not service the spurious interrupt */
/* End workaround */
int_active:
LDR R2, =IRQCount /* Read number of IRQs */
LDR R2, [R2]
CMP R0, R2 /* Clean up and return if no handler */
BHS ret_irq /* In a single-processor system, spurious interrupt ID 1023 does not need any special handling */
LDR R2, =IRQTable /* Get address of handler */
LDR R2, [R2, R0, LSL #2]
CMP R2, #0 /* Clean up and return if handler address is 0 */
BEQ ret_irq
PUSH {R0,R1}
CPSIE i /* Now safe to re-enable interrupts */
BLX R2 /* Call handler. R0 will be IRQ number */
CPSID i /* Disable interrupts again */
/* write EOIR (GIC CPU Interface register) */
POP {R0,R1}
DSB /* Ensure that interrupt source is cleared before we write the EOIR */
ret_irq:
/* epilogue */
STR R0, [R1, #ICCEOIR_OFFSET]
LDR R0, =IRQNestLevel /* Get address of nesting counter */
LDR R1, [R0]
SUB R1, R1, #1 /* Decrement nesting counter */
STR R1, [R0]
POP {R1, LR} /* Get stack adjustment and restore LR_SVC */
ADD SP, SP, R1 /* Unadjust stack */
POP {R0-R3,R12} /* Restore stacked APCS registers */
RFEFD SP! /* Return from exception */
;;;
;;; Add more initialization here
;;;
FPUEnable:
ARM
//Permit access to VFP registers by modifying CPACR
MRC p15,0,R1,c1,c0,2
ORR R1,R1,#0x00F00000
MCR p15,0,R1,c1,c0,2
//Enable VFP
VMRS R1,FPEXC
ORR R1,R1,#0x40000000
VMSR FPEXC,R1
//Initialise VFP registers to 0
MOV R2,#0
VMOV D0, R2,R2
VMOV D1, R2,R2
VMOV D2, R2,R2
VMOV D3, R2,R2
VMOV D4, R2,R2
VMOV D5, R2,R2
VMOV D6, R2,R2
VMOV D7, R2,R2
VMOV D8, R2,R2
VMOV D9, R2,R2
VMOV D10,R2,R2
VMOV D11,R2,R2
VMOV D12,R2,R2
VMOV D13,R2,R2
VMOV D14,R2,R2
VMOV D15,R2,R2
//Initialise FPSCR to a known state
VMRS R2,FPSCR
LDR R3,=0x00086060 //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero.
AND R2,R2,R3
VMSR FPSCR,R2
BX LR
END

View File

@ -30,6 +30,8 @@
#pragma arm section rodata = "BOOT_LOADER"
const char boot_loader[] __attribute__((used)) =
#elif defined (__ICCARM__)
__root const char boot_loader[] @ 0x18000000 =
#else
const char boot_loader[] __attribute__ ((section(".boot_loader"), used)) =

View File

@ -71,8 +71,12 @@ extern uint32_t Image$$VECTORS$$Base;
extern uint32_t Image$$RO_DATA$$Base;
extern uint32_t Image$$RW_DATA$$Base;
extern uint32_t Image$$ZI_DATA$$Base;
#if !defined ( __ICCARM__ )
extern uint32_t Image$$TTB$$ZI$$Base;
#endif
#if defined( __CC_ARM )
#elif defined( __ICCARM__ )
#else
extern uint32_t Image$$RW_DATA_NC$$Base;
extern uint32_t Image$$ZI_DATA_NC$$Base;
@ -88,10 +92,18 @@ extern uint32_t Image$$RW_DATA_NC$$Limit;
extern uint32_t Image$$ZI_DATA_NC$$Limit;
#endif
#if defined( __ICCARM__ )
#define VECTORS_SIZE (((uint32_t)Image$$VECTORS$$Limit >> 20) - ((uint32_t)Image$$VECTORS$$Base >> 20) + 1)
#define RO_DATA_SIZE (((uint32_t)Image$$RO_DATA$$Limit >> 20) - ((uint32_t)Image$$RO_DATA$$Base >> 20) + 1)
#define RW_DATA_SIZE (((uint32_t)Image$$RW_DATA$$Limit >> 20) - ((uint32_t)Image$$RW_DATA$$Base >> 20) + 1)
#define ZI_DATA_SIZE (((uint32_t)Image$$ZI_DATA$$Limit >> 20) - ((uint32_t)Image$$ZI_DATA$$Base >> 20) + 1)
#else
#define VECTORS_SIZE (((uint32_t)&Image$$VECTORS$$Limit >> 20) - ((uint32_t)&Image$$VECTORS$$Base >> 20) + 1)
#define RO_DATA_SIZE (((uint32_t)&Image$$RO_DATA$$Limit >> 20) - ((uint32_t)&Image$$RO_DATA$$Base >> 20) + 1)
#define RW_DATA_SIZE (((uint32_t)&Image$$RW_DATA$$Limit >> 20) - ((uint32_t)&Image$$RW_DATA$$Base >> 20) + 1)
#define ZI_DATA_SIZE (((uint32_t)&Image$$ZI_DATA$$Limit >> 20) - ((uint32_t)&Image$$ZI_DATA$$Base >> 20) + 1)
#endif
#if defined( __CC_ARM )
#else
#define RW_DATA_NC_SIZE (((uint32_t)&Image$$RW_DATA_NC$$Limit >> 20) - ((uint32_t)&Image$$RW_DATA_NC$$Base >> 20) + 1)
@ -112,10 +124,37 @@ static uint32_t Page_L1_64k = 0x0; //generic
static uint32_t Page_4k_Device_RW; //Shared device, not executable, rw, domain 0
static uint32_t Page_64k_Device_RW; //Shared device, not executable, rw, domain 0
#if defined ( __ICCARM__ )
__no_init uint32_t Image$$TTB$$ZI$$Base @ ".retram";
uint32_t Image$$VECTORS$$Base;
uint32_t Image$$RO_DATA$$Base;
uint32_t Image$$RW_DATA$$Base;
uint32_t Image$$ZI_DATA$$Base;
uint32_t Image$$VECTORS$$Limit;
uint32_t Image$$RO_DATA$$Limit;
uint32_t Image$$RW_DATA$$Limit;
uint32_t Image$$ZI_DATA$$Limit;
#endif
void create_translation_table(void)
{
mmu_region_attributes_Type region;
#if defined ( __ICCARM__ )
#pragma section=".intvec"
#pragma section=".rodata"
#pragma section=".rwdata"
#pragma section=".bss"
Image$$VECTORS$$Base = (uint32_t) __section_begin(".intvec");
Image$$VECTORS$$Limit= ((uint32_t)__section_begin(".intvec")+(uint32_t)__section_size(".intvec"));
Image$$RO_DATA$$Base = (uint32_t) __section_begin(".rodata");
Image$$RO_DATA$$Limit= ((uint32_t)__section_begin(".rodata")+(uint32_t)__section_size(".rodata"));
Image$$RW_DATA$$Base = (uint32_t) __section_begin(".rwdata");
Image$$RW_DATA$$Limit= ((uint32_t)__section_begin(".rwdata")+(uint32_t)__section_size(".rwdata"));
Image$$ZI_DATA$$Base = (uint32_t) __section_begin(".bss");
Image$$ZI_DATA$$Limit= ((uint32_t)__section_begin(".bss")+(uint32_t)__section_size(".bss"));
#endif
/*
* Generate descriptors. Refer to MBRZA1H.h to get information about attributes
*
@ -157,13 +196,25 @@ void create_translation_table(void)
__TTSection (&Image$$TTB$$ZI$$Base, Renesas_RZ_A1_PERIPH_BASE0 , 3, Sect_Device_RW);
__TTSection (&Image$$TTB$$ZI$$Base, Renesas_RZ_A1_PERIPH_BASE1 , 49, Sect_Device_RW);
#if defined( __ICCARM__ )
//Define Image
__TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)Image$$RO_DATA$$Base, RO_DATA_SIZE, Sect_Normal_RO);
__TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)Image$$VECTORS$$Base, VECTORS_SIZE, Sect_Normal_Cod);
__TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)Image$$RW_DATA$$Base, RW_DATA_SIZE, Sect_Normal_RW);
__TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)Image$$ZI_DATA$$Base, ZI_DATA_SIZE, Sect_Normal_RW);
#else
//Define Image
__TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$RO_DATA$$Base, RO_DATA_SIZE, Sect_Normal_RO);
__TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$VECTORS$$Base, VECTORS_SIZE, Sect_Normal_Cod);
__TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$RW_DATA$$Base, RW_DATA_SIZE, Sect_Normal_RW);
__TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$ZI_DATA$$Base, ZI_DATA_SIZE, Sect_Normal_RW);
#endif
#if defined( __CC_ARM )
__TTSection (&Image$$TTB$$ZI$$Base, Renesas_RZ_A1_ONCHIP_SRAM_NC_BASE, 10, Sect_Normal_NC);
#elif defined ( __ICCARM__ )
__TTSection (&Image$$TTB$$ZI$$Base, Renesas_RZ_A1_ONCHIP_SRAM_NC_BASE, 10, Sect_Normal_NC);
#else
__TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$RW_DATA_NC$$Base, RW_DATA_NC_SIZE, Sect_Normal_NC);
__TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$ZI_DATA_NC$$Base, ZI_DATA_NC_SIZE, Sect_Normal_NC);

View File

@ -133,6 +133,40 @@ void InitMemorySubsystem(void) {
PL310_Enable();
}
}
#elif defined ( __ICCARM__ )
void InitMemorySubsystem(void) {
/* This SVC is specific for reset where data / tlb / btac may contain undefined data, therefore before
* enabling the cache you must invalidate the instruction cache, the data cache, TLB, and BTAC.
* You are not required to invalidate the main TLB, even though it is recommended for safety
* reasons. This ensures compatibility with future revisions of the processor. */
unsigned int l2_id;
/* Invalidate undefined data */
__ca9u_inv_tlb_all();
__v7_inv_icache_all();
__v7_inv_dcache_all();
__v7_inv_btac();
/* Don't use this function during runtime since caches may contain valid data. For a correct cache maintenance you may need to execute a clean and
* invalidate in order to flush the valid data to the next level cache.
*/
__enable_mmu();
/* After MMU is enabled and data has been invalidated, enable caches and BTAC */
__enable_caches();
__enable_btac();
/* If present, you may also need to Invalidate and Enable L2 cache here */
l2_id = PL310_GetID();
if (l2_id)
{
PL310_InvAllByWay();
PL310_Enable();
}
}
#else
#endif

View File

@ -0,0 +1,97 @@
/* Copyright (c) 2009 - 2012 ARM LIMITED
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of ARM nor the names of its contributors may be used
to endorse or promote products derived from this software without
specific prior written permission.
*
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
---------------------------------------------------------------------------*/
/*----------------------------------------------------------------------------
* Functions
*---------------------------------------------------------------------------*/
SECTION `.text`:CODE:NOROOT(2)
arm
PUBLIC __v7_all_cache
/*
* __STATIC_ASM void __v7_all_cache(uint32_t op) {
*/
__v7_all_cache:
PUSH {R4-R11}
MRC p15, 1, R6, c0, c0, 1 /* Read CLIDR */
ANDS R3, R6, #0x07000000 /* Extract coherency level */
MOV R3, R3, LSR #23 /* Total cache levels << 1 */
BEQ Finished /* If 0, no need to clean */
MOV R10, #0 /* R10 holds current cache level << 1 */
Loop1: ADD R2, R10, R10, LSR #1 /* R2 holds cache "Set" position */
MOV R1, R6, LSR R2 /* Bottom 3 bits are the Cache-type for this level */
AND R1, R1, #7 /* Isolate those lower 3 bits */
CMP R1, #2
BLT Skip /* No cache or only instruction cache at this level */
MCR p15, 2, R10, c0, c0, 0 /* Write the Cache Size selection register */
ISB /* ISB to sync the change to the CacheSizeID reg */
MRC p15, 1, R1, c0, c0, 0 /* Reads current Cache Size ID register */
AND R2, R1, #7 /* Extract the line length field */
ADD R2, R2, #4 /* Add 4 for the line length offset (log2 16 bytes) */
LDR R4, =0x3FF
ANDS R4, R4, R1, LSR #3 /* R4 is the max number on the way size (right aligned) */
CLZ R5, R4 /* R5 is the bit position of the way size increment */
LDR R7, =0x7FFF
ANDS R7, R7, R1, LSR #13 /* R7 is the max number of the index size (right aligned) */
Loop2: MOV R9, R4 /* R9 working copy of the max way size (right aligned) */
Loop3: ORR R11, R10, R9, LSL R5 /* Factor in the Way number and cache number into R11 */
ORR R11, R11, R7, LSL R2 /* Factor in the Set number */
CMP R0, #0
BNE Dccsw
MCR p15, 0, R11, c7, c6, 2 /* DCISW. Invalidate by Set/Way */
B cont
Dccsw: CMP R0, #1
BNE Dccisw
MCR p15, 0, R11, c7, c10, 2 /* DCCSW. Clean by Set/Way */
B cont
Dccisw: MCR p15, 0, R11, c7, c14, 2 /* DCCISW, Clean and Invalidate by Set/Way */
cont: SUBS R9, R9, #1 /* Decrement the Way number */
BGE Loop3
SUBS R7, R7, #1 /* Decrement the Set number */
BGE Loop2
Skip: ADD R10, R10, #2 /* increment the cache number */
CMP R3, R10
BGT Loop1
Finished:
DSB
POP {R4-R11}
BX lr
END
/*----------------------------------------------------------------------------
* end of file
*---------------------------------------------------------------------------*/

View File

@ -89,6 +89,11 @@
#define __STATIC_INLINE static inline
#define __STATIC_ASM static __asm
#include <stdint.h>
inline uint32_t __get_PSR(void) {
__ASM("mrs r0, cpsr");
}
#elif defined ( __TMS470__ )
#define __ASM __asm /*!< asm keyword for TI CCS Compiler */
#define __STATIC_INLINE static inline

View File

@ -570,7 +570,248 @@ __STATIC_INLINE void __v7_clean_inv_dcache_all(void) {
#elif (defined (__ICCARM__)) /*---------------- ICC Compiler ---------------------*/
#error IAR Compiler support not implemented for Cortex-A
#define __inline inline
inline static uint32_t __disable_irq_iar() {
int irq_dis = __get_CPSR() & 0x80; // 7bit CPSR.I
__disable_irq();
return irq_dis;
}
#define MODE_USR 0x10
#define MODE_FIQ 0x11
#define MODE_IRQ 0x12
#define MODE_SVC 0x13
#define MODE_MON 0x16
#define MODE_ABT 0x17
#define MODE_HYP 0x1A
#define MODE_UND 0x1B
#define MODE_SYS 0x1F
/** \brief Set Process Stack Pointer
This function assigns the given value to the USR/SYS Stack Pointer (PSP).
\param [in] topOfProcStack USR/SYS Stack Pointer value to set
*/
// from rt_CMSIS.c
__arm static inline void __set_PSP(uint32_t topOfProcStack) {
__asm(
" ARM\n"
// " PRESERVE8\n"
" BIC R0, R0, #7 ;ensure stack is 8-byte aligned \n"
" MRS R1, CPSR \n"
" CPS #0x1F ;no effect in USR mode \n" // MODE_SYS
" MOV SP, R0 \n"
" MSR CPSR_c, R1 ;no effect in USR mode \n"
" ISB \n"
" BX LR \n");
}
/** \brief Set User Mode
This function changes the processor state to User Mode
*/
// from rt_CMSIS.c
__arm static inline void __set_CPS_USR(void) {
__asm(
" ARM \n"
" CPS #0x10 \n" // MODE_USR
" BX LR\n");
}
/** \brief Set TTBR0
This function assigns the given value to the Translation Table Base Register 0.
\param [in] ttbr0 Translation Table Base Register 0 value to set
*/
// from mmu_Renesas_RZ_A1.c
__STATIC_INLINE void __set_TTBR0(uint32_t ttbr0) {
__MCR(15, 0, ttbr0, 2, 0, 0); // reg to cp15
__ISB();
}
/** \brief Set DACR
This function assigns the given value to the Domain Access Control Register.
\param [in] dacr Domain Access Control Register value to set
*/
// from mmu_Renesas_RZ_A1.c
__STATIC_INLINE void __set_DACR(uint32_t dacr) {
__MCR(15, 0, dacr, 3, 0, 0); // reg to cp15
__ISB();
}
/******************************** Cache and BTAC enable ****************************************************/
/** \brief Set SCTLR
This function assigns the given value to the System Control Register.
\param [in] sctlr System Control Register value to set
*/
// from __enable_mmu()
__STATIC_INLINE void __set_SCTLR(uint32_t sctlr) {
__MCR(15, 0, sctlr, 1, 0, 0); // reg to cp15
}
/** \brief Get SCTLR
This function returns the value of the System Control Register.
\return System Control Register value
*/
// from __enable_mmu()
__STATIC_INLINE uint32_t __get_SCTLR() {
uint32_t __regSCTLR = __MRC(15, 0, 1, 0, 0);
return __regSCTLR;
}
/** \brief Enable Caches
Enable Caches
*/
// from system_Renesas_RZ_A1.c
__STATIC_INLINE void __enable_caches(void) {
__set_SCTLR( __get_SCTLR() | (1 << 12) | (1 << 2));
}
/** \brief Enable BTAC
Enable BTAC
*/
// from system_Renesas_RZ_A1.c
__STATIC_INLINE void __enable_btac(void) {
__set_SCTLR( __get_SCTLR() | (1 << 11));
__ISB();
}
/** \brief Enable MMU
Enable MMU
*/
// from system_Renesas_RZ_A1.c
__STATIC_INLINE void __enable_mmu(void) {
// Set M bit 0 to enable the MMU
// Set AFE bit to enable simplified access permissions model
// Clear TRE bit to disable TEX remap and A bit to disable strict alignment fault checking
__set_SCTLR( (__get_SCTLR() & ~(1 << 28) & ~(1 << 1)) | 1 | (1 << 29));
__ISB();
}
/******************************** TLB maintenance operations ************************************************/
/** \brief Invalidate the whole tlb
TLBIALL. Invalidate the whole tlb
*/
// from system_Renesas_RZ_A1.c
__STATIC_INLINE void __ca9u_inv_tlb_all(void) {
uint32_t val = 0;
__MCR(15, 0, val, 8, 7, 0); // reg to cp15
__MCR(15, 0, val, 8, 6, 0); // reg to cp15
__MCR(15, 0, val, 8, 5, 0); // reg to cp15
__DSB();
__ISB();
}
/******************************** BTB maintenance operations ************************************************/
/** \brief Invalidate entire branch predictor array
BPIALL. Branch Predictor Invalidate All.
*/
// from system_Renesas_RZ_A1.c
__STATIC_INLINE void __v7_inv_btac(void) {
uint32_t val = 0;
__MCR(15, 0, val, 7, 5, 6); // reg to cp15
__DSB(); //ensure completion of the invalidation
__ISB(); //ensure instruction fetch path sees new state
}
/******************************** L1 cache operations ******************************************************/
/** \brief Invalidate the whole I$
ICIALLU. Instruction Cache Invalidate All to PoU
*/
// from system_Renesas_RZ_A1.c
__STATIC_INLINE void __v7_inv_icache_all(void) {
uint32_t val = 0;
__MCR(15, 0, val, 7, 5, 0); // reg to cp15
__DSB(); //ensure completion of the invalidation
__ISB(); //ensure instruction fetch path sees new I cache state
}
// from __v7_inv_dcache_all()
__arm static inline void __v7_all_cache(uint32_t op) {
__asm(
" ARM \n"
" PUSH {R4-R11} \n"
" MRC p15, 1, R6, c0, c0, 1\n" // Read CLIDR
" ANDS R3, R6, #0x07000000\n" // Extract coherency level
" MOV R3, R3, LSR #23\n" // Total cache levels << 1
" BEQ Finished\n" // If 0, no need to clean
" MOV R10, #0\n" // R10 holds current cache level << 1
"Loop1: ADD R2, R10, R10, LSR #1\n" // R2 holds cache "Set" position
" MOV R1, R6, LSR R2 \n" // Bottom 3 bits are the Cache-type for this level
" AND R1, R1, #7 \n" // Isolate those lower 3 bits
" CMP R1, #2 \n"
" BLT Skip \n" // No cache or only instruction cache at this level
" MCR p15, 2, R10, c0, c0, 0 \n" // Write the Cache Size selection register
" ISB \n" // ISB to sync the change to the CacheSizeID reg
" MRC p15, 1, R1, c0, c0, 0 \n" // Reads current Cache Size ID register
" AND R2, R1, #7 \n" // Extract the line length field
" ADD R2, R2, #4 \n" // Add 4 for the line length offset (log2 16 bytes)
" movw R4, #0x3FF \n"
" ANDS R4, R4, R1, LSR #3 \n" // R4 is the max number on the way size (right aligned)
" CLZ R5, R4 \n" // R5 is the bit position of the way size increment
" movw R7, #0x7FFF \n"
" ANDS R7, R7, R1, LSR #13 \n" // R7 is the max number of the index size (right aligned)
"Loop2: MOV R9, R4 \n" // R9 working copy of the max way size (right aligned)
"Loop3: ORR R11, R10, R9, LSL R5 \n" // Factor in the Way number and cache number into R11
" ORR R11, R11, R7, LSL R2 \n" // Factor in the Set number
" CMP R0, #0 \n"
" BNE Dccsw \n"
" MCR p15, 0, R11, c7, c6, 2 \n" // DCISW. Invalidate by Set/Way
" B cont \n"
"Dccsw: CMP R0, #1 \n"
" BNE Dccisw \n"
" MCR p15, 0, R11, c7, c10, 2 \n" // DCCSW. Clean by Set/Way
" B cont \n"
"Dccisw: MCR p15, 0, R11, c7, c14, 2 \n" // DCCISW, Clean and Invalidate by Set/Way
"cont: SUBS R9, R9, #1 \n" // Decrement the Way number
" BGE Loop3 \n"
" SUBS R7, R7, #1 \n" // Decrement the Set number
" BGE Loop2 \n"
"Skip: ADD R10, R10, #2 \n" // increment the cache number
" CMP R3, R10 \n"
" BGT Loop1 \n"
"Finished: \n"
" DSB \n"
" POP {R4-R11} \n"
" BX lr \n" );
}
/** \brief Invalidate the whole D$
DCISW. Invalidate by Set/Way
*/
// from system_Renesas_RZ_A1.c
__STATIC_INLINE void __v7_inv_dcache_all(void) {
__v7_all_cache(0);
}
#include "core_ca_mmu.h"
#elif (defined (__GNUC__)) /*------------------ GNU Compiler ---------------------*/
/* GNU gcc specific functions */

View File

@ -60,7 +60,7 @@
#define PHY_READ (2)
#define MDC_WAIT (6) /* 400ns/4 */
#define BASIC_STS_MSK_LINK (0x0004) /* Link Status */
#define BASIC_STS_MSK_AUTO_CMP (0x0010) /* Auto-Negotiate Complete */
#define BASIC_STS_MSK_AUTO_CMP (0x0020) /* Auto-Negotiate Complete */
#define M_PHY_ID (0xFFFFFFF0)
#define PHY_ID_LAN8710A (0x0007C0F0)
/* ETHERPIR0 */
@ -106,11 +106,20 @@ typedef struct tag_edmac_recv_desc {
/* memory */
/* The whole transmit/receive descriptors (must be allocated in 16-byte boundaries) */
/* Transmit/receive buffers (must be allocated in 16-byte boundaries) */
static uint8_t ehernet_nc_memory[(sizeof(edmac_send_desc_t) * NUM_OF_TX_DESCRIPTOR) +
#if defined(__ICCARM__)
#pragma data_alignment=16
static uint8_t ethernet_nc_memory[(sizeof(edmac_send_desc_t) * NUM_OF_TX_DESCRIPTOR) +
(sizeof(edmac_recv_desc_t) * NUM_OF_RX_DESCRIPTOR) +
(NUM_OF_TX_DESCRIPTOR * SIZE_OF_BUFFER) +
(NUM_OF_RX_DESCRIPTOR * SIZE_OF_BUFFER)] //16 bytes aligned!
@ ".mirrorram";
#else
static uint8_t ethernet_nc_memory[(sizeof(edmac_send_desc_t) * NUM_OF_TX_DESCRIPTOR) +
(sizeof(edmac_recv_desc_t) * NUM_OF_RX_DESCRIPTOR) +
(NUM_OF_TX_DESCRIPTOR * SIZE_OF_BUFFER) +
(NUM_OF_RX_DESCRIPTOR * SIZE_OF_BUFFER)]
__attribute((section("NC_BSS"),aligned(16))); //16 bytes aligned!
#endif
static int32_t rx_read_offset; /* read offset */
static int32_t tx_wite_offset; /* write offset */
static uint32_t send_top_index;
@ -208,7 +217,7 @@ int ethernetext_init(ethernet_cfg_t *p_ethcfg) {
if (p_ethcfg->ether_mac != NULL) {
(void)memcpy(mac_addr, p_ethcfg->ether_mac, sizeof(mac_addr));
} else {
ethernet_address(mac_addr); /* Get MAC Address */
ethernet_address(mac_addr); /* Get MAC Address */
}
return 0;
@ -401,7 +410,7 @@ int ethernet_read(char *data, int dlen) {
void ethernet_address(char *mac) {
if (mac != NULL) {
mbed_mac_address(mac); /* Get MAC Address */
mbed_mac_address(mac); /* Get MAC Address */
}
}
@ -427,8 +436,8 @@ void ethernet_set_link(int speed, int duplex) {
if ((speed < 0) || (speed > 1)) {
data = 0x1000; /* Auto-Negotiation Enable */
phy_reg_write(BASIC_MODE_CONTROL_REG, data);
data = phy_reg_read(BASIC_MODE_STATUS_REG);
for (i = 0; i < 1000; i++) {
data = phy_reg_read(BASIC_MODE_STATUS_REG);
if (((uint32_t)data & BASIC_STS_MSK_AUTO_CMP) != 0) {
break;
}
@ -486,8 +495,8 @@ static void lan_desc_create(void) {
int32_t i;
uint8_t *p_memory_top;
(void)memset((void *)ehernet_nc_memory, 0, sizeof(ehernet_nc_memory));
p_memory_top = ehernet_nc_memory;
(void)memset((void *)ethernet_nc_memory, 0, sizeof(ethernet_nc_memory));
p_memory_top = ethernet_nc_memory;
/* Descriptor area configuration */
p_eth_desc_dsend = (edmac_send_desc_t *)p_memory_top;

View File

@ -512,7 +512,11 @@ int serial_getc(serial_t *obj) {
int data;
int was_masked;
#if defined ( __ICCARM__ )
was_masked = __disable_irq_iar();
#else
was_masked = __disable_irq();
#endif /* __ICCARM__ */
if (obj->uart->SCFSR & 0x93) {
err_read = obj->uart->SCFSR;
obj->uart->SCFSR = (err_read & ~0x93);
@ -529,7 +533,11 @@ int serial_getc(serial_t *obj) {
while (!serial_readable(obj));
data = obj->uart->SCFRDR & 0xff;
#if defined ( __ICCARM__ )
was_masked = __disable_irq_iar();
#else
was_masked = __disable_irq();
#endif /* __ICCARM__ */
err_read = obj->uart->SCFSR;
obj->uart->SCFSR = (err_read & 0xfffD); // Clear RDF
if (!was_masked) {
@ -546,14 +554,22 @@ void serial_putc(serial_t *obj, int c) {
uint16_t dummy_read;
int was_masked;
#if defined ( __ICCARM__ )
was_masked = __disable_irq_iar();
#else
was_masked = __disable_irq();
#endif /* __ICCARM__ */
obj->uart->SCSCR |= 0x0080; // Set TIE
if (!was_masked) {
__enable_irq();
}
while (!serial_writable(obj));
obj->uart->SCFTDR = c;
#if defined ( __ICCARM__ )
was_masked = __disable_irq_iar();
#else
was_masked = __disable_irq();
#endif /* __ICCARM__ */
dummy_read = obj->uart->SCFSR;
obj->uart->SCFSR = (dummy_read & 0xff9f); // Clear TEND/TDFE
if (!was_masked) {
@ -572,7 +588,11 @@ int serial_writable(serial_t *obj) {
void serial_clear(serial_t *obj) {
int was_masked;
#if defined ( __ICCARM__ )
was_masked = __disable_irq_iar();
#else
was_masked = __disable_irq();
#endif /* __ICCARM__ */
obj->uart->SCFCR |= 0x06; // TFRST = 1, RFRST = 1
obj->uart->SCFCR &= ~0x06; // TFRST = 0, RFRST = 0
@ -589,7 +609,11 @@ void serial_pinout_tx(PinName tx) {
void serial_break_set(serial_t *obj) {
int was_masked;
#if defined ( __ICCARM__ )
was_masked = __disable_irq_iar();
#else
was_masked = __disable_irq();
#endif /* __ICCARM__ */
// TxD Output(L)
obj->uart->SCSPTR &= ~0x0001u; // SPB2DT = 0
obj->uart->SCSCR &= ~0x0020u; // TE = 0 (Output disable)
@ -600,7 +624,11 @@ void serial_break_set(serial_t *obj) {
void serial_break_clear(serial_t *obj) {
int was_masked;
#if defined ( __ICCARM__ )
was_masked = __disable_irq_iar();
#else
was_masked = __disable_irq();
#endif /* __ICCARM__ */
obj->uart->SCSCR |= 0x0020u; // TE = 1 (Output enable)
obj->uart->SCSPTR |= 0x0001u; // SPB2DT = 1
if (!was_masked) {
@ -615,7 +643,11 @@ void serial_set_flow_control(serial_t *obj, FlowControl type, PinName rxflow, Pi
serial_flow_irq_set(obj, 0);
if (type == FlowControlRTSCTS) {
#if defined ( __ICCARM__ )
was_masked = __disable_irq_iar();
#else
was_masked = __disable_irq();
#endif /* __ICCARM__ */
obj->uart->SCFCR = 0x0008u; // CTS/RTS enable
if (!was_masked) {
__enable_irq();
@ -623,7 +655,11 @@ void serial_set_flow_control(serial_t *obj, FlowControl type, PinName rxflow, Pi
pinmap_pinout(rxflow, PinMap_UART_RTS);
pinmap_pinout(txflow, PinMap_UART_CTS);
} else {
#if defined ( __ICCARM__ )
was_masked = __disable_irq_iar();
#else
was_masked = __disable_irq();
#endif /* __ICCARM__ */
obj->uart->SCFCR = 0x0000u; // CTS/RTS diable
if (!was_masked) {
__enable_irq();

View File

@ -85,7 +85,11 @@ uint32_t us_ticker_read() {
uint64_t us_val64;
int check_irq_masked;
#if defined ( __ICCARM__)
check_irq_masked = __disable_irq_iar();
#else
check_irq_masked = __disable_irq();
#endif /* __ICCARM__ */
cnt_val64 = ticker_read_counter64();
us_val64 = (cnt_val64 / count_clock);

View File

@ -76,6 +76,62 @@ extern OS_RESULT rt_mut_wait (OS_ID mutex, uint16_t timeout);
OS_RESULT _os_mut_release (uint32_t p, OS_ID mutex) __svc_indirect(0);
OS_RESULT _os_mut_wait (uint32_t p, OS_ID mutex, uint16_t timeout) __svc_indirect(0);
#elif defined (__ICCARM__)
typedef void *OS_ID;
typedef uint32_t OS_TID;
typedef uint32_t OS_MUT[4];
typedef uint32_t OS_RESULT;
#define runtask_id() rt_tsk_self()
#define mutex_init(m) rt_mut_init(m)
#define mutex_del(m) os_mut_delete(m)
#define mutex_wait(m) os_mut_wait(m,0xFFFF)
#define mutex_rel(m) os_mut_release(m)
extern OS_TID rt_tsk_self (void);
extern void rt_mut_init (OS_ID mutex);
extern OS_RESULT rt_mut_delete (OS_ID mutex);
extern OS_RESULT rt_mut_release (OS_ID mutex);
extern OS_RESULT rt_mut_wait (OS_ID mutex, uint16_t timeout);
#pragma swi_number=0
__swi OS_RESULT _os_mut_delete (OS_ID mutex);
static inline OS_RESULT os_mut_delete(OS_ID mutex)
{
__asm("mov r12,%0\n" :: "r"(&rt_mut_delete) : "r12" );
return _os_mut_delete(mutex);
}
#pragma swi_number=0
__swi OS_RESULT _os_mut_release (OS_ID mutex);
static inline OS_RESULT os_mut_release(OS_ID mutex)
{
__asm("mov r12,%0\n" :: "r"(&rt_mut_release) : "r12" );
return _os_mut_release(mutex);
}
#pragma swi_number=0
__swi OS_RESULT _os_mut_wait (OS_ID mutex, uint16_t timeout);
static inline OS_RESULT os_mut_wait(OS_ID mutex, uint16_t timeout)
{
__asm("mov r12,%0\n" :: "r"(&rt_mut_wait) : "r12" );
return _os_mut_wait(mutex, timeout);
}
#include <yvals.h> /* for include DLib_Thread.h */
void __iar_system_Mtxinit(__iar_Rmtx *);
void __iar_system_Mtxdst(__iar_Rmtx *);
void __iar_system_Mtxlock(__iar_Rmtx *);
void __iar_system_Mtxunlock(__iar_Rmtx *);
#endif
@ -174,6 +230,14 @@ uint16_t const mp_tmr_size = 0;
static OS_MUT std_libmutex[OS_MUTEXCNT];
static uint32_t nr_mutex;
extern void *__libspace_start;
#elif defined (__ICCARM__)
typedef struct os_mut_array {
OS_MUT mutex;
uint32_t used;
} os_mut_array_t;
static os_mut_array_t std_libmutex[OS_MUTEXCNT];/* must be Zero clear */
static uint32_t nr_mutex = 0;
#endif
@ -247,6 +311,82 @@ __attribute__((used)) void _mutex_release (OS_ID *mutex) {
}
}
#elif defined (__ICCARM__)
/*--------------------------- __iar_system_Mtxinit --------------------------*/
void __iar_system_Mtxinit(__iar_Rmtx *mutex)
{
/* Allocate and initialize a system mutex. */
int32_t idx;
for (idx = 0; idx < OS_MUTEXCNT; idx++)
{
if (std_libmutex[idx].used == 0)
{
std_libmutex[idx].used = 1;
*mutex = &std_libmutex[idx].mutex;
nr_mutex++;
break;
}
}
if (nr_mutex >= OS_MUTEXCNT)
{
/* If you are here, you need to increase the number OS_MUTEXCNT. */
for (;;);
}
mutex_init (*mutex);
}
/*--------------------------- __iar_system_Mtxdst ---------------------------*/
void __iar_system_Mtxdst(__iar_Rmtx *mutex)
{
/* Free a system mutex. */
int32_t idx;
if (nr_mutex == 0)
{
for (;;);
}
idx = ((((uint32_t)mutex) - ((uint32_t)&std_libmutex[0].mutex))
/ sizeof(os_mut_array_t));
if (idx >= OS_MUTEXCNT)
{
for (;;);
}
mutex_del (*mutex);
std_libmutex[idx].used = 0;
}
/*--------------------------- __iar_system_Mtxlock --------------------------*/
void __iar_system_Mtxlock(__iar_Rmtx *mutex)
{
/* Acquire a system mutex, lock stdlib resources. */
if (runtask_id ())
{
/* RTX running, acquire a mutex. */
mutex_wait (*mutex);
}
}
/*--------------------------- __iar_system_Mtxunlock ------------------------*/
void __iar_system_Mtxunlock(__iar_Rmtx *mutex)
{
/* Release a system mutex, unlock stdlib resources. */
if (runtask_id ())
{
/* RTX running, release a mutex. */
mutex_rel (*mutex);
}
}
#endif
@ -386,16 +526,11 @@ __attribute__((naked)) void software_init_hook (void) {
#elif defined (__ICCARM__)
extern int __low_level_init(void);
extern void __iar_data_init3(void);
extern void exit(int arg);
__noreturn __stackless void __cmain(void) {
void mbed_main(void) {
int a;
if (__low_level_init() != 0) {
__iar_data_init3();
}
osKernelInitialize();
osThreadCreate(&os_thread_def_main, NULL);
a = osKernelStart();

View File

@ -0,0 +1,46 @@
/*----------------------------------------------------------------------------
* RL-ARM - RTX
*----------------------------------------------------------------------------
* Name: HAL_CA9.c
* Purpose: Hardware Abstraction Layer for Cortex-A9
* Rev.: 23 March 2015
*----------------------------------------------------------------------------
*
* Copyright (c) 2012 - 2015 ARM Limited
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*---------------------------------------------------------------------------*/
//unsigned char seen_id0_active = 0; // single byte to hold a flag used in the workaround for GIC errata 733075
/*----------------------------------------------------------------------------
* Functions
*---------------------------------------------------------------------------*/
/* Functions move to HAL_CA9_asm.S */
/*----------------------------------------------------------------------------
* end of file
*---------------------------------------------------------------------------*/

View File

@ -0,0 +1,480 @@
/*----------------------------------------------------------------------------
* RL-ARM - RTX
*----------------------------------------------------------------------------
* Name: HAL_CA9.c
* Purpose: Hardware Abstraction Layer for Cortex-A9
* Rev.: 8 April 2015
*----------------------------------------------------------------------------
*
* Copyright (c) 2012 - 2015 ARM Limited
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*---------------------------------------------------------------------------*/
PUBLIC rt_set_PSP
PUBLIC rt_get_PSP
PUBLIC _alloc_box
PUBLIC _free_box
PUBLIC SWI_Handler
PUBLIC PendSV_Handler
PUBLIC OS_Tick_Handler
/* macro defines form rt_HAL_CA.h */
#define CPSR_T_BIT 0x20
#define CPSR_I_BIT 0x80
#define CPSR_F_BIT 0x40
#define MODE_USR 0x10
#define MODE_FIQ 0x11
#define MODE_IRQ 0x12
#define MODE_SVC 0x13
#define MODE_ABT 0x17
#define MODE_UND 0x1B
#define MODE_SYS 0x1F
/* macro defines form rt_TypeDef.h */
#define TCB_TID 3 /* 'task id' offset */
#define TCB_STACKF 37 /* 'stack_frame' offset */
#ifndef __LARGE_PRIV_STACK
#define TCB_TSTACK 40 /* 'tsk_stack' offset */
#else
#define TCB_TSTACK 44 /* 'tsk_stack' offset for LARGE_STACK */
#endif
IMPORT rt_alloc_box
IMPORT rt_free_box
IMPORT os_tsk
IMPORT GICInterface_BASE
IMPORT rt_pop_req
IMPORT os_tick_irqack
IMPORT rt_systick
SECTION `.text`:CODE:ROOT(2)
/*----------------------------------------------------------------------------
* Functions
*---------------------------------------------------------------------------*/
//For A-class, set USR/SYS stack
//__asm void rt_set_PSP (U32 stack) {
rt_set_PSP:
ARM
MRS R1, CPSR
CPS #MODE_SYS ;no effect in USR mode
ISB
MOV SP, R0
MSR CPSR_c, R1 ;no effect in USR mode
ISB
BX LR
//}
//For A-class, get USR/SYS stack
//__asm U32 rt_get_PSP (void) {
rt_get_PSP:
ARM
MRS R1, CPSR
CPS #MODE_SYS ;no effect in USR mode
ISB
MOV R0, SP
MSR CPSR_c, R1 ;no effect in USR mode
ISB
BX LR
//}
/*--------------------------- _alloc_box ------------------------------------*/
//__asm void *_alloc_box (void *box_mem) {
_alloc_box:
/* Function wrapper for Unprivileged/Privileged mode. */
ARM
LDR R12,=(rt_alloc_box)
MRS R2, CPSR
LSLS R2, R2,#28
BXNE R12
SVC 0
BX LR
//}
/*--------------------------- _free_box -------------------------------------*/
//__asm int _free_box (void *box_mem, void *box) {
_free_box:
/* Function wrapper for Unprivileged/Privileged mode. */
LDR R12,=(rt_free_box)
MRS R2, CPSR
LSLS R2, R2,#28
BXNE R12
SVC 0
BX LR
//}
/*-------------------------- SWI_Handler -----------------------------------*/
//#pragma push
//#pragma arm
//__asm void SWI_Handler (void) {
SWI_Handler:
PRESERVE8
ARM
IMPORT rt_tsk_lock
IMPORT rt_tsk_unlock
IMPORT SVC_Count
IMPORT SVC_Table
IMPORT rt_stk_check
IMPORT FPUEnable
IMPORT scheduler_suspended ; flag set by rt_suspend, cleared by rt_resume, read by SWI_Handler
Mode_SVC EQU 0x13
SRSDB #Mode_SVC! ; Push LR_SVC and SPRS_SVC onto SVC mode stack
STR R4,[SP,#-0x4]! ; Push R4 so we can use it as a temp
MRS R4,SPSR ; Get SPSR
TST R4,#CPSR_T_BIT ; Check Thumb Bit
LDRNEH R4,[LR,#-2] ; Thumb: Load Halfword
BICNE R4,R4,#0xFF00 ; Extract SVC Number
LDREQ R4,[LR,#-4] ; ARM: Load Word
BICEQ R4,R4,#0xFF000000 ; Extract SVC Number
/* Lock out systick and re-enable interrupts */
STMDB SP!,{R0-R3,R12,LR}
AND R12, SP, #4 ; Ensure stack is 8-byte aligned
SUB SP, SP, R12 ; Adjust stack
STMDB SP!,{R12, LR} ; Store stack adjustment and dummy LR to SVC stack
BLX rt_tsk_lock
CPSIE i
LDMIA SP!,{R12,LR} ; Get stack adjustment & discard dummy LR
ADD SP, SP, R12 ; Unadjust stack
LDMIA SP!,{R0-R3,R12,LR}
CMP R4,#0
BNE SVC_User
MRS R4,SPSR
STR R4,[SP,#-0x4]! ; Push R4 so we can use it as a temp
AND R4, SP, #4 ; Ensure stack is 8-byte aligned
SUB SP, SP, R4 ; Adjust stack
STMDB SP!,{R4, LR} ; Store stack adjustment and dummy LR
BLX R12
LDMIA SP!,{R4, LR} ; Get stack adjustment & discard dummy LR
ADD SP, SP, R4 ; Unadjust stack
LDR R4,[SP],#0x4 ; Restore R4
MSR SPSR_CXSF,R4
/* Here we will be in SVC mode (even if coming in from PendSV_Handler or OS_Tick_Handler) */
Sys_Switch:
LDR LR,=(os_tsk)
LDMIA LR,{R4,LR} ; os_tsk.run, os_tsk.new
CMP R4,LR
BNE switching
STMDB SP!,{R0-R3,R12,LR}
AND R12, SP, #4 ; Ensure stack is 8-byte aligned
SUB SP, SP, R12 ; Adjust stack
STMDB SP!,{R12,LR} ; Store stack adjustment and dummy LR to SVC stack
CPSID i
; Do not unlock scheduler if it has just been suspended by rt_suspend()
LDR R1,=scheduler_suspended
LDRB R0, [R1]
CMP R0, #1
BEQ dont_unlock
BLX rt_tsk_unlock
dont_unlock:
LDMIA SP!,{R12,LR} ; Get stack adjustment & discard dummy LR
ADD SP, SP, R12 ; Unadjust stack
LDMIA SP!,{R0-R3,R12,LR}
LDR R4,[SP],#0x4
RFEFD SP! ; Return from exception, no task switch
switching:
CLREX
CMP R4,#0
ADDEQ SP,SP,#12 ; Original R4, LR & SPSR do not need to be popped when we are paging in a different task
BEQ SVC_Next ; Runtask deleted?
STMDB SP!,{R8-R11} //R4 and LR already stacked
MOV R10,R4 ; Preserve os_tsk.run
MOV R11,LR ; Preserve os_tsk.new
ADD R8,SP,#16 ; Unstack R4,LR
LDMIA R8,{R4,LR}
SUB SP,SP,#4 ; Make space on the stack for the next instn
STMIA SP,{SP}^ ; Put User SP onto stack
LDR R8,[SP],#0x4 ; Pop User SP into R8
MRS R9,SPSR
STMDB R8!,{R9} ; User CPSR
STMDB R8!,{LR} ; User PC
STMDB R8,{LR}^ ; User LR
SUB R8,R8,#4 ; No writeback for store of User LR
STMDB R8!,{R0-R3,R12} ; User R0-R3,R12
MOV R3,R10 ; os_tsk.run
MOV LR,R11 ; os_tsk.new
LDMIA SP!,{R9-R12}
ADD SP,SP,#12 ; Fix up SP for unstack of R4, LR & SPSR
STMDB R8!,{R4-R7,R9-R12} ; User R4-R11
//If applicable, stack VFP/NEON state
MRC p15,0,R1,c1,c0,2 ; VFP/NEON access enabled? (CPACR)
AND R2,R1,#0x00F00000
CMP R2,#0x00F00000
BNE no_outgoing_vfp
VMRS R2,FPSCR
STMDB R8!,{R2,R4} ; Push FPSCR, maintain 8-byte alignment
//IF {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} == 32
VSTMDB R8!,{D0-D15}
VSTMDB R8!,{D16-D31}
LDRB R2,[R3,#TCB_STACKF] ; Record in TCB that NEON/D32 state is stacked
ORR R2,R2,#4
STRB R2,[R3,#TCB_STACKF]
//ENDIF
no_outgoing_vfp:
STR R8,[R3,#TCB_TSTACK]
MOV R4,LR
STR R4,[SP,#-0x4]! ; Push R4 so we can use it as a temp
AND R4, SP, #4 ; Ensure stack is 8-byte aligned
SUB SP, SP, R4 ; Adjust stack
STMDB SP!,{R4, LR} ; Store stack adjustment and dummy LR to SVC stack
BLX rt_stk_check
LDMIA SP!,{R4, LR} ; Get stack adjustment & discard dummy LR
ADD SP, SP, R4 ; Unadjust stack
LDR R4,[SP],#0x4 ; Restore R4
MOV LR,R4
SVC_Next: //R4 == os_tsk.run, LR == os_tsk.new, R0-R3, R5-R12 corruptible
LDR R1,=(os_tsk) ; os_tsk.run = os_tsk.new
STR LR,[R1]
LDRB R1,[LR,#TCB_TID] ; os_tsk.run->task_id
LSL R1,R1,#8 ; Store PROCID
MCR p15,0,R1,c13,c0,1 ; Write CONTEXTIDR
LDR R0,[LR,#TCB_TSTACK] ; os_tsk.run->tsk_stack
//Does incoming task have VFP/NEON state in stack?
LDRB R3,[LR,#TCB_STACKF]
ANDS R3, R3, #0x6
MRC p15,0,R1,c1,c0,2 ; Read CPACR
BICEQ R1,R1,#0x00F00000 ; Disable VFP/NEON access if incoming task does not have stacked VFP/NEON state
ORRNE R1,R1,#0x00F00000 ; Enable VFP/NEON access if incoming task does have stacked VFP/NEON state
MCR p15,0,R1,c1,c0,2 ; Write CPACR
BEQ no_incoming_vfp
ISB ; We only need the sync if we enabled, otherwise we will context switch before next VFP/NEON instruction anyway
//IF {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} == 32
VLDMIA R0!,{D16-D31}
//ENDIF
VLDMIA R0!,{D0-D15}
LDR R2,[R0]
VMSR FPSCR,R2
ADD R0,R0,#8
no_incoming_vfp:
LDR R1,[R0,#60] ; Restore User CPSR
MSR SPSR_CXSF,R1
LDMIA R0!,{R4-R11} ; Restore User R4-R11
ADD R0,R0,#4 ; Restore User R1-R3,R12
LDMIA R0!,{R1-R3,R12}
LDMIA R0,{LR}^ ; Restore User LR
ADD R0,R0,#4 ; No writeback for load to user LR
LDMIA R0!,{LR} ; Restore User PC
ADD R0,R0,#4 ; Correct User SP for unstacked user CPSR
STR R0,[SP,#-0x4]! ; Push R0 onto stack
LDMIA SP,{SP}^ ; Get R0 off stack into User SP
ADD SP,SP,#4 ; Put SP back
LDR R0,[R0,#-32] ; Restore R0
STMDB SP!,{R0-R3,R12,LR}
AND R12, SP, #4 ; Ensure stack is 8-byte aligned
SUB SP, SP, R12 ; Adjust stack
STMDB sp!,{R12, LR} ; Store stack adjustment and dummy LR to SVC stack
CPSID i
BLX rt_tsk_unlock
LDMIA sp!,{R12, LR} ; Get stack adjustment & discard dummy LR
ADD SP, SP, R12 ; Unadjust stack
LDMIA SP!,{R0-R3,R12,LR}
MOVS PC,LR ; Return from exception
/*------------------- User SVC -------------------------------*/
SVC_User:
LDR R12,=SVC_Count
LDR R12,[R12]
CMP R4,R12 ; Check for overflow
BHI SVC_Done
LDR R12,=SVC_Table-4
LDR R12,[R12,R4,LSL #2] ; Load SVC Function Address
MRS R4,SPSR ; Save SPSR
STR R4,[SP,#-0x4]! ; Push R4 so we can use it as a temp
AND R4, SP, #4 ; Ensure stack is 8-byte aligned
SUB SP, SP, R4 ; Adjust stack
STMDB SP!,{R4, LR} ; Store stack adjustment and dummy LR
BLX R12 ; Call SVC Function
LDMIA SP!,{R4, LR} ; Get stack adjustment & discard dummy LR
ADD SP, SP, R4 ; Unadjust stack
LDR R4,[SP],#0x4 ; Restore R4
MSR SPSR_CXSF,R4 ; Restore SPSR
SVC_Done:
STMDB sp!,{R0-R3,R12,LR}
STR R4,[sp,#-0x4]! ; Push R4 so we can use it as a temp
AND R4, SP, #4 ; Ensure stack is 8-byte aligned
SUB SP, SP, R4 ; Adjust stack
STMDB SP!,{R4, LR} ; Store stack adjustment and dummy LR
CPSID i
BLX rt_tsk_unlock
LDMIA SP!,{R4, LR} ; Get stack adjustment & discard dummy LR
ADD SP, SP, R4 ; Unadjust stack
LDR R4,[SP],#0x4 ; Restore R4
LDMIA SP!,{R0-R3,R12,LR}
LDR R4,[SP],#0x4
RFEFD SP! ; Return from exception
//}
//#pragma pop
//#pragma push
//#pragma arm
//__asm void PendSV_Handler (U32 IRQn) {
PendSV_Handler:
ARM
IMPORT rt_tsk_lock
IMPORT IRQNestLevel ; Flag indicates whether inside an ISR, and the depth of nesting. 0 = not in ISR.
IMPORT seen_id0_active ; Flag used to workaround GIC 390 errata 733075 - set in startup_Renesas_RZ_A1.s
ADD SP,SP,#8 //fix up stack pointer (R0 has been pushed and will never be popped, R1 was pushed for stack alignment)
//Disable systick interrupts, then write EOIR. We want interrupts disabled before we enter the context switcher.
STMDB SP!,{R0, R1}
BLX rt_tsk_lock
LDMIA SP!,{R0, R1}
LDR R1,=(GICInterface_BASE)
LDR R1, [R1, #0]
STR R0, [R1, #0x10]
; If it was interrupt ID0, clear the seen flag, otherwise return as normal
CMP R0, #0
LDREQ R1, =seen_id0_active
STRBEQ R0, [R1] ; Clear the seen flag, using R0 (which is 0), to save loading another register
LDR R0, =IRQNestLevel ; Get address of nesting counter
LDR R1, [R0]
SUB R1, R1, #1 ; Decrement nesting counter
STR R1, [R0]
BLX (rt_pop_req)
LDMIA SP!,{R1, LR} ; Get stack adjustment & discard dummy LR
ADD SP, SP, R1 ; Unadjust stack
LDR R0,[SP,#24]
MSR SPSR_CXSF,R0
LDMIA SP!,{R0-R3,R12} ; Leave SPSR & LR on the stack
STR R4,[SP,#-0x4]!
B Sys_Switch
//}
//#pragma pop
//#pragma push
//#pragma arm
//__asm void OS_Tick_Handler (U32 IRQn) {
OS_Tick_Handler:
ARM
IMPORT rt_tsk_lock
IMPORT IRQNestLevel ; Flag indicates whether inside an ISR, and the depth of nesting. 0 = not in ISR.
IMPORT seen_id0_active ; Flag used to workaround GIC 390 errata 733075 - set in startup_Renesas_RZ_A1.s
ADD SP,SP,#8 //fix up stack pointer (R0 has been pushed and will never be popped, R1 was pushed for stack alignment)
STMDB SP!,{R0, R1}
BLX rt_tsk_lock
LDMIA SP!,{R0, R1}
LDR R1, =(GICInterface_BASE)
LDR R1, [R1, #0]
STR R0, [R1, #0x10]
; If it was interrupt ID0, clear the seen flag, otherwise return as normal
CMP R0, #0
LDREQ R1, =seen_id0_active
STRBEQ R0, [R1] ; Clear the seen flag, using R0 (which is 0), to save loading another register
LDR R0, =IRQNestLevel ; Get address of nesting counter
LDR R1, [R0]
SUB R1, R1, #1 ; Decrement nesting counter
STR R1, [R0]
BLX (os_tick_irqack)
BLX (rt_systick)
LDMIA SP!,{R1, LR} ; Get stack adjustment & discard dummy LR
ADD SP, SP, R1 ; Unadjust stack
LDR R0,[SP,#24]
MSR SPSR_CXSF,R0
LDMIA SP!,{R0-R3,R12} ; Leave SPSR & LR on the stack
STR R4,[SP,#-0x4]!
B Sys_Switch
//}
//#pragma pop
END
/*----------------------------------------------------------------------------
* end of file
*---------------------------------------------------------------------------*/

View File

@ -0,0 +1,57 @@
;/*----------------------------------------------------------------------------
; * RL-ARM - RTX
; *----------------------------------------------------------------------------
; * Name: SVC_TABLE.S
; * Purpose: Pre-defined SVC Table for Cortex-M
; * Rev.: V4.70
; *----------------------------------------------------------------------------
; *
; * Copyright (c) 1999-2009 KEIL, 2009-2013 ARM Germany GmbH
; * All rights reserved.
; * Redistribution and use in source and binary forms, with or without
; * modification, are permitted provided that the following conditions are met:
; * - Redistributions of source code must retain the above copyright
; * notice, this list of conditions and the following disclaimer.
; * - Redistributions in binary form must reproduce the above copyright
; * notice, this list of conditions and the following disclaimer in the
; * documentation and/or other materials provided with the distribution.
; * - Neither the name of ARM nor the names of its contributors may be used
; * to endorse or promote products derived from this software without
; * specific prior written permission.
; *
; * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
; * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
; * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
; * ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
; * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
; * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
; * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
; * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
; * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
; * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
; * POSSIBILITY OF SUCH DAMAGE.
; *---------------------------------------------------------------------------*/
SECTION SVC_TABLE:CODE:ROOT(2)
EXPORT SVC_Count
SVC_Cnt EQU (SVC_End-SVC_Table)/4
SVC_Count DCD SVC_Cnt
; Import user SVC functions here.
; IMPORT __SVC_1
EXPORT SVC_Table
SVC_Table
; Insert user SVC functions here. SVC 0 used by RTL Kernel.
; DCD __SVC_1 ; InitMemorySubsystem
SVC_End
END
/*----------------------------------------------------------------------------
* end of file
*---------------------------------------------------------------------------*/

View File

@ -143,7 +143,7 @@ used throughout the whole project.
#define CMSIS_OS_RTX_CA /* new define for Coretex-A */
// The stack space occupied is mainly dependent on the underling C standard library
#if defined(TOOLCHAIN_GCC) || defined(TOOLCHAIN_ARM_STD)
#if defined(TOOLCHAIN_GCC) || defined(TOOLCHAIN_ARM_STD) || defined(TOOLCHAIN_IAR)
# define WORDS_STACK_SIZE 512
#elif defined(TOOLCHAIN_ARM_MICRO)
# define WORDS_STACK_SIZE 128

View File

@ -75,8 +75,6 @@
#elif defined (__ICCARM__) /* IAR Compiler */
#error IAR Compiler support not implemented for Cortex-A
#endif
static U8 priority = 0xff;
@ -99,6 +97,15 @@ extern const U32 GICInterface_BASE;
#define SGI_PENDSV_BIT ((U32)(1 << (SGI_PENDSV & 0xf)))
//Increase priority filter to prevent timer and PendSV interrupts signaling. Guarantees that interrupts will not be forwarded.
#if defined (__ICCARM__)
#define OS_LOCK() int irq_dis = __disable_irq_iar();\
priority = GICI_ICCPMR; \
GICI_ICCPMR = 0xff; \
GICI_ICCPMR = GICI_ICCPMR - 1; \
__DSB();\
if(!irq_dis) __enable_irq(); \
#else
#define OS_LOCK() int irq_dis = __disable_irq();\
priority = GICI_ICCPMR; \
GICI_ICCPMR = 0xff; \
@ -106,6 +113,8 @@ extern const U32 GICInterface_BASE;
__DSB();\
if(!irq_dis) __enable_irq(); \
#endif
//Restore priority filter. Re-enable timer and PendSV signaling
#define OS_UNLOCK() __DSB(); \
GICI_ICCPMR = priority; \
@ -133,10 +142,15 @@ extern const U32 GICInterface_BASE;
#ifdef __USE_EXCLUSIVE_ACCESS
#define rt_inc(p) while(__strex((__ldrex(p)+1),p))
#define rt_dec(p) while(__strex((__ldrex(p)-1),p))
#else
#if defined (__ICCARM__)
#define rt_inc(p) { int irq_dis = __disable_irq_iar();(*p)++;if(!irq_dis) __enable_irq(); }
#define rt_dec(p) { int irq_dis = __disable_irq_iar();(*p)--;if(!irq_dis) __enable_irq(); }
#else
#define rt_inc(p) { int irq_dis = __disable_irq();(*p)++;if(!irq_dis) __enable_irq(); }
#define rt_dec(p) { int irq_dis = __disable_irq();(*p)--;if(!irq_dis) __enable_irq(); }
#endif
#endif /* __ICCARM__ */
#endif /* __USE_EXCLUSIVE_ACCESS */
__inline static U32 rt_inc_qi (U32 size, U8 *count, U8 *first) {
U32 cnt,c2;
@ -152,7 +166,11 @@ __inline static U32 rt_inc_qi (U32 size, U8 *count, U8 *first) {
} while (__strex(c2, first));
#else
int irq_dis;
#if defined (__ICCARM__)
irq_dis = __disable_irq_iar();
#else
irq_dis = __disable_irq();
#endif /* __ICCARM__ */
if ((cnt = *count) < size) {
*count = cnt+1;
c2 = (cnt = *first) + 1;

View File

@ -101,7 +101,12 @@ void *rt_alloc_box (void *box_mem) {
#ifndef __USE_EXCLUSIVE_ACCESS
int irq_dis;
#if defined (__ICCARM__)
irq_dis = __disable_irq_iar();
#else
irq_dis = __disable_irq ();
#endif /* __ICCARM__ */
free = ((P_BM) box_mem)->free;
if (free) {
((P_BM) box_mem)->free = *free;
@ -152,7 +157,11 @@ int rt_free_box (void *box_mem, void *box) {
}
#ifndef __USE_EXCLUSIVE_ACCESS
#if defined (__ICCARM__)
irq_dis = __disable_irq_iar();
#else
irq_dis = __disable_irq ();
#endif /* __ICCARM__ */
*((void **)box) = ((P_BM) box_mem)->free;
((P_BM) box_mem)->free = box;
if (!irq_dis) __enable_irq ();

View File

@ -69,8 +69,17 @@ typedef struct OS_TCB {
/* Hardware dependant part: specific for Cortex processor */
U8 stack_frame; /* Stack frame: 0x0 Basic, 0x1 Extended, 0x2 VFP/D16 stacked, 0x4 NEON/D32 stacked */
#if defined (__ICCARM__)
#ifndef __LARGE_PRIV_STACK
U16 priv_stack; /* Private stack size, 0= system assigned */
#else
U16 reserved; /* Reserved (padding) */
U32 priv_stack; /* Private stack size for LARGE_STACK, 0= system assigned */
#endif /* __LARGE_PRIV_STACK */
#else
U16 reserved; /* Reserved (padding) */
U32 priv_stack; /* Private stack size for LARGE_STACK, 0= system assigned */
#endif
U32 tsk_stack; /* Current task Stack pointer (R13) */
U32 *stack; /* Pointer to Task Stack memory block */
@ -79,7 +88,15 @@ typedef struct OS_TCB {
} *P_TCB;
#define TCB_TID 3 /* 'task id' offset */
#define TCB_STACKF 37 /* 'stack_frame' offset */
#if defined (__ICCARM__)
#ifndef __LARGE_PRIV_STACK
#define TCB_TSTACK 40 /* 'tsk_stack' offset */
#else
#define TCB_TSTACK 44 /* 'tsk_stack' offset for LARGE_STACK */
#endif /* __LARGE_PRIV_STACK */
#else
#define TCB_TSTACK 44 /* 'tsk_stack' offset for LARGE_STACK */
#endif
typedef struct OS_PSFE { /* Post Service Fifo Entry */
void *id; /* Object Identification */

View File

@ -87,6 +87,7 @@ class IAREmbeddedWorkbench(Exporter):
'ARCH_BLE',
'MOTE_L152RC',
'EFM32PG_STK3401',
'RZ_A1H',
]
def generate(self):

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -268,6 +268,7 @@ if __name__ == '__main__':
('iar', 'MAXWSNENV'),
('iar', 'MAX32600MBED'),
('iar', 'MOTE_L152RC'),
('iar', 'RZ_A1H'),
# ('sw4stm32', 'DISCO_F051R8'),
# ('sw4stm32', 'DISCO_F100RB'),

View File

@ -1630,7 +1630,7 @@ class RZ_A1H(Target):
Target.__init__(self)
self.core = "Cortex-A9"
self.extra_labels = ['RENESAS', 'MBRZA1H']
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.supported_toolchains = ["ARM", "GCC_ARM", "IAR"]
self.supported_form_factors = ["ARDUINO"]
self.default_toolchain = "ARM"