Merge pull request #933 from masaohamanaka/master

RZ/A1H - Modify to support GCC and Fix some bugs of driver.
pull/944/head
Martin Kojtal 2015-02-27 10:55:20 +01:00
commit 43d7f387ec
33 changed files with 2135 additions and 419 deletions

View File

@ -52,7 +52,7 @@ Typedef definitions
/******************************************************************************* /*******************************************************************************
Macro definitions Macro definitions
*******************************************************************************/ *******************************************************************************/
#define DUMMY_ACCESS (*(volatile unsigned long *)(OSTM0CNT)) #define DUMMY_ACCESS OSTM0CNT
/* #define CACHE_WRITEBACK */ /* #define CACHE_WRITEBACK */

View File

@ -515,10 +515,10 @@ extern "C" {
if (RZA_IO_RegRead_16( if (RZA_IO_RegRead_16(
&g_usb0_function_pipecfg[pipe], USB_PIPECFG_DIR_SHIFT, USB_PIPECFG_DIR) == 0) { &g_usb0_function_pipecfg[pipe], USB_PIPECFG_DIR_SHIFT, USB_PIPECFG_DIR) == 0) {
/* read */ /* read */
__nop(); __NOP();
} else { } else {
/* write */ /* write */
__nop(); __NOP();
} }
} }
} }
@ -629,7 +629,7 @@ extern "C" {
if (RZA_IO_RegRead_16( if (RZA_IO_RegRead_16(
&g_usb0_function_pipecfg[pipe], USB_PIPECFG_DIR_SHIFT, USB_PIPECFG_DIR) == 0) { &g_usb0_function_pipecfg[pipe], USB_PIPECFG_DIR_SHIFT, USB_PIPECFG_DIR) == 0) {
/* read */ /* read */
__nop(); __NOP();
} else { } else {
/* write */ /* write */
EPx_read_status = DEVDRV_USBF_PIPE_WAIT; EPx_read_status = DEVDRV_USBF_PIPE_WAIT;

View File

@ -56,7 +56,7 @@ Typedef definitions
/******************************************************************************* /*******************************************************************************
Macro definitions Macro definitions
*******************************************************************************/ *******************************************************************************/
#define DUMMY_ACCESS (*(volatile unsigned long *)(OSTM0CNT)) #define DUMMY_ACCESS OSTM0CNT
/* #define CACHE_WRITEBACK */ /* #define CACHE_WRITEBACK */

View File

@ -237,6 +237,7 @@ void USBHALHost::_usbisr(void) {
void USBHALHost::UsbIrqhandler() { void USBHALHost::UsbIrqhandler() {
uint32_t int_status = ohciwrapp_reg_r(OHCI_REG_INTERRUPTSTATUS) & ohciwrapp_reg_r(OHCI_REG_INTERRUPTENABLE); uint32_t int_status = ohciwrapp_reg_r(OHCI_REG_INTERRUPTSTATUS) & ohciwrapp_reg_r(OHCI_REG_INTERRUPTENABLE);
uint32_t data;
if (int_status != 0) { //Is there something to actually process? if (int_status != 0) { //Is there something to actually process?
// Root hub status change interrupt // Root hub status change interrupt
@ -254,7 +255,8 @@ void USBHALHost::UsbIrqhandler() {
wait_ms(150); wait_ms(150);
//Hub 0 (root hub), Port 1 (count starts at 1), Low or High speed //Hub 0 (root hub), Port 1 (count starts at 1), Low or High speed
deviceConnected(0, 1, ohciwrapp_reg_r(OHCI_REG_RHPORTSTATUS1) & OR_RH_PORT_LSDA); data = ohciwrapp_reg_r(OHCI_REG_RHPORTSTATUS1) & OR_RH_PORT_LSDA;
deviceConnected(0, 1, data);
} }
//Root device disconnected //Root device disconnected

View File

@ -459,6 +459,10 @@ extern "C" void __iar_argc_argv() {
// Linker defined symbol used by _sbrk to indicate where heap should start. // Linker defined symbol used by _sbrk to indicate where heap should start.
extern "C" int __end__; extern "C" int __end__;
#if defined(TARGET_CORTEX_A)
extern "C" uint32_t __HeapLimit;
#endif
// Turn off the errno macro and use actual global variable instead. // Turn off the errno macro and use actual global variable instead.
#undef errno #undef errno
extern "C" int errno; extern "C" int errno;
@ -474,6 +478,8 @@ extern "C" caddr_t _sbrk(int incr) {
#if defined(TARGET_ARM7) #if defined(TARGET_ARM7)
if (new_heap >= stack_ptr) { if (new_heap >= stack_ptr) {
#elif defined(TARGET_CORTEX_A)
if (new_heap >= (unsigned char*)&__HeapLimit) { /* __HeapLimit is end of heap section */
#else #else
if (new_heap >= (unsigned char*)__get_MSP()) { if (new_heap >= (unsigned char*)__get_MSP()) {
#endif #endif

View File

@ -1,13 +1,14 @@
/* Linker script for mbed LPC1768 */ /* Linker script for mbed RZ_A1H */
/* Linker script to configure memory regions. */ /* Linker script to configure memory regions. */
MEMORY MEMORY
{ {
FLASH (rx) : ORIGIN = 0x20000000, LENGTH = 512K ROM (rx) : ORIGIN = 0x00000000, LENGTH = 0x02000000
RAM (rwx) : ORIGIN = 0x20080000, LENGTH = (1M) BOOT_LOADER (rx) : ORIGIN = 0x18000000, LENGTH = 0x00004000
SFLASH (rx) : ORIGIN = 0x18004000, LENGTH = 0x07FFC000
USB_RAM(rwx) : ORIGIN = 0x20180000, LENGTH = 16K L_TTB (rw) : ORIGIN = 0x20000000, LENGTH = 0x00004000
ETH_RAM(rwx) : ORIGIN = 0x20280000, LENGTH = 16K RAM (rwx) : ORIGIN = 0x20020000, LENGTH = 0x00700000
RAM_NC (rwx) : ORIGIN = 0x20900000, LENGTH = 0x00100000
} }
/* Linker script to place sections and symbol values. Should be used together /* Linker script to place sections and symbol values. Should be used together
@ -40,9 +41,21 @@ ENTRY(Reset_Handler)
SECTIONS SECTIONS
{ {
.boot :
{
KEEP(*(.boot_loader))
} > BOOT_LOADER
.text : .text :
{ {
Image$$VECTORS$$Base = .;
* (RESET)
Image$$VECTORS$$Limit = .;
. += 0x00000400;
KEEP(*(.isr_vector)) KEEP(*(.isr_vector))
*(SVC_TABLE)
*(.text*) *(.text*)
KEEP(*(.init)) KEEP(*(.init))
@ -62,31 +75,66 @@ SECTIONS
*(SORT(.dtors.*)) *(SORT(.dtors.*))
*(.dtors) *(.dtors)
Image$$RO_DATA$$Base = .;
*(.rodata*) *(.rodata*)
Image$$RO_DATA$$Limit = .;
KEEP(*(.eh_frame*)) KEEP(*(.eh_frame*))
} > FLASH } > SFLASH
.ARM.extab : .ARM.extab :
{ {
*(.ARM.extab* .gnu.linkonce.armextab.*) *(.ARM.extab* .gnu.linkonce.armextab.*)
} > FLASH } > SFLASH
__exidx_start = .; __exidx_start = .;
.ARM.exidx : .ARM.exidx :
{ {
*(.ARM.exidx* .gnu.linkonce.armexidx.*) *(.ARM.exidx* .gnu.linkonce.armexidx.*)
} > FLASH } > SFLASH
__exidx_end = .; __exidx_end = .;
.copy.table :
{
. = ALIGN(4);
__copy_table_start__ = .;
LONG (__etext)
LONG (__data_start__)
LONG (__data_end__ - __data_start__)
LONG (__etext2)
LONG (__nc_data_start)
LONG (__nc_data_end - __nc_data_start)
__copy_table_end__ = .;
} > SFLASH
.zero.table :
{
. = ALIGN(4);
__zero_table_start__ = .;
LONG (__bss_start__)
LONG (__bss_end__ - __bss_start__)
LONG (__nc_bss_start)
LONG (__nc_bss_end - __nc_bss_start)
__zero_table_end__ = .;
} > SFLASH
__etext = .; __etext = .;
.ttb :
{
Image$$TTB$$ZI$$Base = .;
. += 0x00004000;
Image$$TTB$$ZI$$Limit = .;
} > L_TTB
.data : AT (__etext) .data : AT (__etext)
{ {
Image$$RW_DATA$$Base = .;
__data_start__ = .; __data_start__ = .;
Image$$RW_RAM1$$Base = .;
*(vtable) *(vtable)
*(.data*) *(.data*)
Image$$RW_DATA$$Limit = .;
. = ALIGN(4); . = ALIGN(4);
/* preinit data */ /* preinit data */
@ -116,13 +164,14 @@ SECTIONS
} > RAM } > RAM
.bss : .bss ALIGN(0x400):
{ {
Image$$ZI_DATA$$Base = .;
__bss_start__ = .; __bss_start__ = .;
*(.bss*) *(.bss*)
*(COMMON) *(COMMON)
__bss_end__ = .; __bss_end__ = .;
Image$$RW_RAM1$$ZI$$Limit = . ; Image$$ZI_DATA$$Limit = .;
} > RAM } > RAM
@ -142,6 +191,29 @@ SECTIONS
*(.stack) *(.stack)
} > RAM } > RAM
__etext2 = __etext + SIZEOF(.data);
.nc_data : AT (__etext2)
{
Image$$RW_DATA_NC$$Base = .;
__nc_data_start = .;
*(NC_DATA)
. = ALIGN(4);
__nc_data_end = .;
Image$$RW_DATA_NC$$Limit = .;
} > RAM_NC
.nc_bss (NOLOAD) :
{
Image$$ZI_DATA_NC$$Base = .;
__nc_bss_start = .;
*(NC_BSS)
. = ALIGN(4);
__nc_bss_end = .;
Image$$ZI_DATA_NC$$Limit = .;
} > RAM_NC
/* Set stack top to end of RAM, and stack limit move down by /* Set stack top to end of RAM, and stack limit move down by
* size of stack_dummy section */ * size of stack_dummy section */
__StackTop = ORIGIN(RAM) + LENGTH(RAM); __StackTop = ORIGIN(RAM) + LENGTH(RAM);

View File

@ -1,125 +0,0 @@
@/*******************************************************************************
@* DISCLAIMER
@* This software is supplied by Renesas Electronics Corporation and is only
@* intended for use with Renesas products. No other uses are authorized. This
@* software is owned by Renesas Electronics Corporation and is protected under
@* all applicable laws, including copyright laws.
@* THIS SOFTWARE IS PROVIDED "AS IS" AND RENESAS MAKES NO WARRANTIES REGARDING
@* THIS SOFTWARE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING BUT NOT
@* LIMITED TO WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE
@* AND NON-INFRINGEMENT. ALL SUCH WARRANTIES ARE EXPRESSLY DISCLAIMED.
@* TO THE MAXIMUM EXTENT PERMITTED NOT PROHIBITED BY LAW, NEITHER RENESAS
@* ELECTRONICS CORPORATION NOR ANY OF ITS AFFILIATED COMPANIES SHALL BE LIABLE
@* FOR ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES FOR
@* ANY REASON RELATED TO THIS SOFTWARE, EVEN IF RENESAS OR ITS AFFILIATES HAVE
@* BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
@* Renesas reserves the right, without notice, to make changes to this software
@* and to discontinue the availability of this software. By using this software,
@* you agree to the additional terms and conditions found by accessing the
@* following link:
@* http://www.renesas.com/disclaimer
@* Copyright (C) 2012 - 2014 Renesas Electronics Corporation. All rights reserved.
@*******************************************************************************/
@/*******************************************************************************
@* File Name : irqfiq_handler.s
@* $Rev: 823 $
@* $Date:: 2014-04-21 16:45:10 +0900#$
@* Description : IRQ, FIQ handler
@*******************************************************************************/
@ Standard definitions of mode bits and interrupt (I & F) flags in PSRs
.EQU SYS_MODE, 0x1f
@ INTC ICCIAR register address
.EQU INTC_ICCIAR_ADDR, 0xE820200C
@ INTC ICCEOIR register address
.EQU INTC_ICCEOIR_ADDR, 0xE8202010
@ INTC ICDABR0 register address
.EQU INTC_ICDABR0_ADDR, 0xE8201300
@ INTC ICDIPR0 register address
.EQU INTC_ICDIPR0_ADDR, 0xE8201400
@ INTC ICCHPIR register address
.EQU INTC_ICCHPIR_ADDR, 0xE8202018
@==================================================================
@ Entry point for the FIQ handler
@==================================================================
@ PRESERVE8
@ .section IRQ_FIQ_HANDLER, #execinstr
.text
.arm
@ IMPORT FiqHandler_Interrupt
@ IMPORT INTC_Handler_Interrupt
.global irq_handler
.global fiq_handler
@******************************************************************************
@ Function Name : fiq_handler
@ Description : This function is the assembler function executed when the FIQ
@ : interrupt is generated.
@******************************************************************************
fiq_handler:
B FiqHandler_Interrupt
@******************************************************************************
@ Function Name : irq_handler
@ Description : This function is the assembler function executed when the IRQ
@ : interrupt is generated. After saving the stack pointer and
@ : the stack for general registers and obtaining the INTC interrupt
@ : source ID, calls the IntcIrqHandler_interrupt function written
@ : in C language to execute the processing for the INTC interrupt
@ : handler corresponding to the interrupt source ID.
@ : After the INTC interrupt handler processing, restores
@ : the stack pointer and the general registers from the stack and
@ : returns from the IRQ interrupt processing.
@******************************************************************************
irq_handler:
SUB lr, lr, #4
SRSDB sp!, #SYS_MODE @;; Store LR_irq and SPSR_irq in system mode stack
CPS #SYS_MODE @;; Switch to system mode
PUSH {r0-r3, r12} @;; Store other AAPCS registers
LDR r1, =INTC_ICCHPIR_ADDR
LDR r3, [r1]
LDR r2, =INTC_ICCIAR_ADDR
LDR r0, [r2] @;; Read ICCIAR
LDR r2, =0x000003FF
AND r3, r0, r2
CMP r3, r2
BEQ end_of_handler
CMP r3, #0
BNE int_active
LDR r2, =INTC_ICDABR0_ADDR
LDR r3, [r2]
AND r3, r3, #0x00000001
CMP r3, #0
BNE int_active
LDR r2, =INTC_ICDIPR0_ADDR
LDR r3, [r2]
STR r3, [r2]
B end_of_handler
int_active:
PUSH {r0}
MOV r1, sp @;;
AND r1, r1, #4 @;; Make alignment for stack
SUB sp, sp, r1 @;;
PUSH {r1, lr}
BL INTC_Handler_Interrupt @;; First argument(r0) = ICCIAR read value
POP {r1, lr}
ADD sp, sp, r1
POP {r0}
LDR r2, =INTC_ICCEOIR_ADDR
STR r0, [r2] @;; Write ICCEOIR
end_of_handler:
POP {r0-r3, r12} @;; Restore registers
RFEIA sp! @;; Return from system mode stack using RFE
Literals3:
.LTORG
.END

View File

@ -16,67 +16,90 @@
* CONSEQUENTIAL DAMAGES, FOR ANY REASON WHATSOEVER. * CONSEQUENTIAL DAMAGES, FOR ANY REASON WHATSOEVER.
*/ */
.syntax unified .syntax unified
.extern _start
@ Standard definitions of mode bits and interrupt (I & F) flags in PSRs @ Standard definitions of mode bits and interrupt (I & F) flags in PSRs
.equ USR_MODE, 0x10 .equ USR_MODE , 0x10
.equ FIQ_MODE, 0x11 .equ FIQ_MODE , 0x11
.equ IRQ_MODE, 0x12 .equ IRQ_MODE , 0x12
.equ SVC_MODE, 0x13 .equ SVC_MODE , 0x13
.equ ABT_MODE, 0x17 .equ ABT_MODE , 0x17
.equ UND_MODE, 0x1b .equ UND_MODE , 0x1b
.equ SYS_MODE, 0x1f .equ SYS_MODE , 0x1f
.equ Thum_bit, 0x20 @ CPSR/SPSR Thumb bit .equ Thum_bit , 0x20 @ CPSR/SPSR Thumb bit
/* Memory Model .equ GICI_BASE , 0xe8202000
The HEAP starts at the end of the DATA section and grows upward. .equ ICCIAR_OFFSET , 0x0000000C
.equ ICCEOIR_OFFSET , 0x00000010
.equ ICCHPIR_OFFSET , 0x00000018
.equ GICD_BASE , 0xe8201000
.equ ICDISER0_OFFSET , 0x00000100
.equ ICDICER0_OFFSET , 0x00000180
.equ ICDISPR0_OFFSET , 0x00000200
.equ ICDABR0_OFFSET , 0x00000300
.equ ICDIPR0_OFFSET , 0x00000400
The STACK starts at the end of the RAM and grows downward. .equ Mode_USR , 0x10
.equ Mode_FIQ , 0x11
.equ Mode_IRQ , 0x12
.equ Mode_SVC , 0x13
.equ Mode_ABT , 0x17
.equ Mode_UND , 0x1B
.equ Mode_SYS , 0x1F
The HEAP and stack STACK are only checked at compile time: .equ I_Bit , 0x80 @ when I bit is set, IRQ is disabled
(DATA_SIZE + HEAP_SIZE + STACK_SIZE) < RAM_SIZE .equ F_Bit , 0x40 @ when F bit is set, FIQ is disabled
.equ T_Bit , 0x20 @ when T bit is set, core is in Thumb state
This is just a check for the bare minimum for the Heap+Stack area before .equ GIC_ERRATA_CHECK_1, 0x000003FE
aborting compilation, it is not the run time limit: .equ GIC_ERRATA_CHECK_2, 0x000003FF
Heap_Size + Stack_Size = 0x80 + 0x80 = 0x100
*/ .equ Sect_Normal , 0x00005c06 @ outer & inner wb/wa, non-shareable, executable, rw, domain 0, base addr 0
.equ Sect_Normal_Cod , 0x0000dc06 @ outer & inner wb/wa, non-shareable, executable, ro, domain 0, base addr 0
.equ Sect_Normal_RO , 0x0000dc16 @ as Sect_Normal_Cod, but not executable
.equ Sect_Normal_RW , 0x00005c16 @ as Sect_Normal_Cod, but writeable and not executable
.equ Sect_SO , 0x00000c12 @ strongly-ordered (therefore shareable), not executable, rw, domain 0, base addr 0
.equ Sect_Device_RO , 0x00008c12 @ device, non-shareable, non-executable, ro, domain 0, base addr 0
.equ Sect_Device_RW , 0x00000c12 @ as Sect_Device_RO, but writeable
.equ Sect_Fault , 0x00000000 @ this translation will fault (the bottom 2 bits are important, the rest are ignored)
.equ RAM_BASE , 0x80000000
.equ VRAM_BASE , 0x18000000
.equ SRAM_BASE , 0x2e000000
.equ ETHERNET , 0x1a000000
.equ CS3_PERIPHERAL_BASE, 0x1c000000
@ Stack Configuration
.EQU UND_Stack_Size , 0x00000100
.EQU SVC_Stack_Size , 0x00008000
.EQU ABT_Stack_Size , 0x00000100
.EQU FIQ_Stack_Size , 0x00000100
.EQU IRQ_Stack_Size , 0x00008000
.EQU USR_Stack_Size , 0x00004000
.EQU ISR_Stack_Size, (UND_Stack_Size + SVC_Stack_Size + ABT_Stack_Size + FIQ_Stack_Size + IRQ_Stack_Size)
.section .stack .section .stack
.align 3 .align 3
#ifdef __STACK_SIZE
.equ Stack_Size, __STACK_SIZE
#else
.equ Stack_Size, 0xc00
#endif
.globl __StackTop .globl __StackTop
.globl __StackLimit .globl __StackLimit
__StackLimit: __StackLimit:
.space Stack_Size .space ISR_Stack_Size
__initial_sp:
.space USR_Stack_Size
.size __StackLimit, . - __StackLimit .size __StackLimit, . - __StackLimit
__StackTop: __StackTop:
.size __StackTop, . - __StackTop .size __StackTop, . - __StackTop
__AStackLimit:
.space Stack_Size
.size __AStackLimit, . - __AStackLimit @ Heap Configuration
__AStackTop:
.size __AStackTop, . - __AStackTop .EQU Heap_Size , 0x00080000
__BStackLimit:
.space Stack_Size
.size __BStackLimit, . - __StackLimit
__BStackTop:
.size __BStackTop, . - __BStackTop
__CStackLimit:
.space Stack_Size
.size __CStackLimit, . - __CStackLimit
__CStackTop:
.size __CStackTop, . - __CStackTop
.section .heap .section .heap
.align 3 .align 3
#ifdef __HEAP_SIZE
.equ Heap_Size, __HEAP_SIZE
#else
.equ Heap_Size, 0x800
#endif
.globl __HeapBase .globl __HeapBase
.globl __HeapLimit .globl __HeapLimit
__HeapBase: __HeapBase:
@ -85,163 +108,425 @@ __HeapBase:
__HeapLimit: __HeapLimit:
.size __HeapLimit, . - __HeapLimit .size __HeapLimit, . - __HeapLimit
.section .isr_vector .section .isr_vector
.align 2 .align 2
.globl __isr_vector .globl __isr_vector
__isr_vector: __isr_vector:
.long 0xe59ff018 // 0x00 .long 0xe59ff018 // 0x00
.long 0xe59ff018 // 0x04 .long 0xe59ff018 // 0x04
.long 0xe59ff018 // 0x08 .long 0xe59ff018 // 0x08
.long 0xe59ff018 // 0x0c .long 0xe59ff018 // 0x0c
.long 0xe59ff018 // 0x10 .long 0xe59ff018 // 0x10
.long 0xe59ff018 // 0x14 .long 0xe59ff018 // 0x14
.long 0xe59ff018 // 0x18 .long 0xe59ff018 // 0x18
.long 0xe59ff018 // 0x1c .long 0xe59ff018 // 0x1c
.long Reset_Handler /* 0x20 */ .long Reset_Handler /* 0x20 */
.long undefinedInstruction /* 0x24 */ .long Undef_Handler /* 0x24 */
.long softwareInterrupt /* 0x28 */ .long SVC_Handler /* 0x28 */
.long prefetchAboart /* 0x2c */ .long PAbt_Handler /* 0x2c */
.long dataAbort /* 0x30 */ .long DAbt_Handler /* 0x30 */
.long 0 /* Reserved */ .long 0 /* Reserved */
.long irq_handler /* IRQ */ .long IRQ_Handler /* IRQ */
.long fiq_handler /* FIQ */ .long FIQ_Handler /* FIQ */
.size __isr_vector, . - __isr_vector .size __isr_vector, . - __isr_vector
.text .text
// .thumb
// .thumb_func
.align 2 .align 2
.globl Reset_Handler .globl Reset_Handler
.type Reset_Handler, %function .type Reset_Handler, %function
Reset_Handler: Reset_Handler:
/* Loop to copy data from read only memory to RAM. The ranges @ Put any cores other than 0 to sleep
* of copy from/to are specified by following symbols evaluated in mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
* linker script. ands r0, r0, #3
* _etext: End of code section, i.e., begin of data sections to copy from.
* __data_start__/__data_end__: RAM address range that data should be
* copied to. Both must be aligned to 4 bytes boundary. */
mrc p15, 0, r0, c1, c0, 0 @;; Read CP15 System Control register (SCTLR) goToSleep:
bic r0, r0, #(0x1 << 12) @;; Clear I bit 12 to disable I Cache wfine
bic r0, r0, #(0x1 << 2) @;; Clear C bit 2 to disable D Cache bne goToSleep
bic r0, r0, #0x1 @;; Clear M bit 0 to disable MMU
mcr p15, 0, r0, c1, c0, 0 @;; Write value back to CP15 System Control register
@;; SVC Mode(Default) @ Enable access to NEON/VFP by enabling access to Coprocessors 10 and 11.
LDR sp, =__AStackTop @ Enables Full Access i.e. in both privileged and non privileged modes
mrc p15, 0, r0, c1, c0, 2 @ Read Coprocessor Access Control Register (CPACR)
orr r0, r0, #(0xF << 20) @ Enable access to CP 10 & 11
mcr p15, 0, r0, c1, c0, 2 @ Write Coprocessor Access Control Register (CPACR)
isb
CPS #IRQ_MODE @;; IRQ Mode @ Switch on the VFP and NEON hardware
LDR sp, =__BStackTop mov r0, #0x40000000
vmsr fpexc, r0 @ Write FPEXC register, EN bit set
CPS #FIQ_MODE @;; FIQ Mode mrc p15, 0, r0, c1, c0, 0 @ Read CP15 System Control register
LDR sp, =__CStackTop bic r0, r0, #(0x1 << 12) @ Clear I bit 12 to disable I Cache
bic r0, r0, #(0x1 << 2) @ Clear C bit 2 to disable D Cache
bic r0, r0, #0x1 @ Clear M bit 0 to disable MMU
bic r0, r0, #(0x1 << 11) @ Clear Z bit 11 to disable branch prediction
bic r0, r0, #(0x1 << 13) @ Clear V bit 13 to disable hivecs
mcr p15, 0, r0, c1, c0, 0 @ Write value back to CP15 System Control register
isb
@CPS #ABT_MODE @;; ABT Mode @ Set Vector Base Address Register (VBAR) to point to this application's vector table
@LDR sp, =__StackTop ldr r0, =__isr_vector
mcr p15, 0, r0, c12, c0, 0
CPS #SYS_MODE @;; SYS Mode @ Setup Stack for each exceptional mode
/* ldr r0, =__StackTop */
ldr r0, =__initial_sp
@; System mode Stack pointer is set up ARM_LIB_STACK in the __main()->__entry() @ Enter Undefined Instruction Mode and set its Stack Pointer
LDR sp, =__StackTop msr cpsr_c, #(Mode_UND | I_Bit | F_Bit)
mov sp, r0
sub r0, r0, #UND_Stack_Size
ldr r1, =__etext @ Enter Abort Mode and set its Stack Pointer
ldr r2, =__data_start__ msr cpsr_c, #(Mode_ABT | I_Bit | F_Bit)
ldr r3, =__data_end__ mov sp, r0
sub r0, r0, #ABT_Stack_Size
.Lflash_to_ram_loop: @ Enter FIQ Mode and set its Stack Pointer
cmp r2, r3 msr cpsr_c, #(Mode_FIQ | I_Bit | F_Bit)
ittt lt mov sp, r0
ldrlt r0, [r1], #4 sub r0, r0, #FIQ_Stack_Size
strlt r0, [r2], #4
blt .Lflash_to_ram_loop
ldr r0, =set_low_vector @ Enter IRQ Mode and set its Stack Pointer
blx r0 msr cpsr_c, #(Mode_IRQ | I_Bit | F_Bit)
ldr r0, =enable_VFP mov sp, r0
blx r0 sub r0, r0, #IRQ_Stack_Size
ldr r0, =SystemInit @ Enter Supervisor Mode and set its Stack Pointer
blx r0 msr cpsr_c, #(Mode_SVC | I_Bit | F_Bit)
ldr r0, =_start mov sp, r0
bx r0
set_low_vector: @ Enter System Mode to complete initialization and enter kernel
mrc p15, 0, r0, c1, c0, 0 msr cpsr_c, #(Mode_SYS | I_Bit | F_Bit)
mov r1, #0xffffdfff mov sp, r0
and r0, r1
mcr p15, 0, r0, c1, c0, 0
mrc p15, 0, r0, c12, c0, 0 // vector set isb
mov r0, #0x20000000 ldr r0, =RZ_A1_SetSramWriteEnable
mcr p15, 0, r0, c12, c0, 0 // vector set blx r0
bx lr
.extern create_translation_table
bl create_translation_table
@ USR/SYS stack pointer will be set during kernel init
ldr r0, =SystemInit
blx r0
ldr r0, =InitMemorySubsystem
blx r0
@ fp_init
mov r0, #0x3000000
vmsr fpscr, r0
@ data sections copy
ldr r4, =__copy_table_start__
ldr r5, =__copy_table_end__
.L_loop0:
cmp r4, r5
bge .L_loop0_done
ldr r1, [r4]
ldr r2, [r4, #4]
ldr r3, [r4, #8]
.L_loop0_0:
subs r3, #4
ittt ge
ldrge r0, [r1, r3]
strge r0, [r2, r3]
bge .L_loop0_0
adds r4, #12
b .L_loop0
.L_loop0_done:
@ bss sections clear
ldr r3, =__zero_table_start__
ldr r4, =__zero_table_end__
.L_loop2:
cmp r3, r4
bge .L_loop2_done
ldr r1, [r3]
ldr r2, [r3, #4]
movs r0, 0
.L_loop2_0:
subs r2, #4
itt ge
strge r0, [r1, r2]
bge .L_loop2_0
adds r3, #8
b .L_loop2
.L_loop2_done:
ldr r0, =_start
bx r0
ldr r0, sf_boot @ dummy to keep boot loader area
loop_here:
b loop_here
sf_boot:
.word boot_loader
.equ VFPEnable, 0x40000000
enable_VFP:
;;
mrc p15, 0, r0, c1, c0, 2 ;
orr r0, r0, #(3 << 20) ;
orr r0, r0, #(3 << 22) ;
bic r0, r0, #(3 << 30) ;
mcr p15, 0, r0, c1, c0, 2 ;
isb ;
;;
mov r0, #VFPEnable
vmsr fpexc, r0
bx lr
;;
.pool .pool
.size Reset_Handler, . - Reset_Handler .size Reset_Handler, . - Reset_Handler
.text .text
Undef_Handler:
.global Undef_Handler
.func Undef_Handler
.extern CUndefHandler
SRSDB SP!, #Mode_UND
PUSH {R0-R4, R12} /* Save APCS corruptible registers to UND mode stack */
MRS R0, SPSR
TST R0, #T_Bit /* Check mode */
MOVEQ R1, #4 /* R1 = 4 ARM mode */
MOVNE R1, #2 /* R1 = 2 Thumb mode */
SUB R0, LR, R1
LDREQ R0, [R0] /* ARM mode - R0 points to offending instruction */
BEQ undef_cont
/* Thumb instruction */
/* Determine if it is a 32-bit Thumb instruction */
LDRH R0, [R0]
MOV R2, #0x1c
CMP R2, R0, LSR #11
BHS undef_cont /* 16-bit Thumb instruction */
/* 32-bit Thumb instruction. Unaligned - we need to reconstruct the offending instruction. */
LDRH R2, [LR]
ORR R0, R2, R0, LSL #16
undef_cont:
MOV R2, LR /* Set LR to third argument */
/* AND R12, SP, #4 */ /* Ensure stack is 8-byte aligned */
MOV R3, SP /* Ensure stack is 8-byte aligned */
AND R12, R3, #4
SUB SP, SP, R12 /* Adjust stack */
PUSH {R12, LR} /* Store stack adjustment and dummy LR */
/* R0 Offending instruction */
/* R1 =2 (Thumb) or =4 (ARM) */
BL CUndefHandler
POP {R12, LR} /* Get stack adjustment & discard dummy LR */
ADD SP, SP, R12 /* Unadjust stack */
LDR LR, [SP, #24] /* Restore stacked LR and possibly adjust for retry */
SUB LR, LR, R0
LDR R0, [SP, #28] /* Restore stacked SPSR */
MSR SPSR_cxsf, R0
POP {R0-R4, R12} /* Restore stacked APCS registers */
ADD SP, SP, #8 /* Adjust SP for already-restored banked registers */
MOVS PC, LR
.endfunc
PAbt_Handler:
.global PAbt_Handler
.func PAbt_Handler
.extern CPAbtHandler
SUB LR, LR, #4 /* Pre-adjust LR */
SRSDB SP!, #Mode_ABT /* Save LR and SPRS to ABT mode stack */
PUSH {R0-R4, R12} /* Save APCS corruptible registers to ABT mode stack */
MRC p15, 0, R0, c5, c0, 1 /* IFSR */
MRC p15, 0, R1, c6, c0, 2 /* IFAR */
MOV R2, LR /* Set LR to third argument */
/* AND R12, SP, #4 */ /* Ensure stack is 8-byte aligned */
MOV R3, SP /* Ensure stack is 8-byte aligned */
AND R12, R3, #4
SUB SP, SP, R12 /* Adjust stack */
PUSH {R12, LR} /* Store stack adjustment and dummy LR */
BL CPAbtHandler
POP {R12, LR} /* Get stack adjustment & discard dummy LR */
ADD SP, SP, R12 /* Unadjust stack */
POP {R0-R4, R12} /* Restore stack APCS registers */
RFEFD SP! /* Return from exception */
.endfunc
DAbt_Handler:
.global DAbt_Handler
.func DAbt_Handler
.extern CDAbtHandler
SUB LR, LR, #8 /* Pre-adjust LR */
SRSDB SP!, #Mode_ABT /* Save LR and SPRS to ABT mode stack */
PUSH {R0-R4, R12} /* Save APCS corruptible registers to ABT mode stack */
CLREX /* State of exclusive monitors unknown after taken data abort */
MRC p15, 0, R0, c5, c0, 0 /* DFSR */
MRC p15, 0, R1, c6, c0, 0 /* DFAR */
MOV R2, LR /* Set LR to third argument */
/* AND R12, SP, #4 */ /* Ensure stack is 8-byte aligned */
MOV R3, SP /* Ensure stack is 8-byte aligned */
AND R12, R3, #4
SUB SP, SP, R12 /* Adjust stack */
PUSH {R12, LR} /* Store stack adjustment and dummy LR */
BL CDAbtHandler
POP {R12, LR} /* Get stack adjustment & discard dummy LR */
ADD SP, SP, R12 /* Unadjust stack */
POP {R0-R4, R12} /* Restore stacked APCS registers */
RFEFD SP! /* Return from exception */
.endfunc
FIQ_Handler:
.global FIQ_Handler
.func FIQ_Handler
/* An FIQ might occur between the dummy read and the real read of the GIC in IRQ_Handler,
* so if a real FIQ Handler is implemented, this will be needed before returning:
*/
/* LDR R1, =GICI_BASE
LDR R0, [R1, #ICCHPIR_OFFSET] ; Dummy Read ICCHPIR (GIC CPU Interface register) to avoid GIC 390 errata 801120
*/
B .
.endfunc
.extern SVC_Handler /* refer RTX function */
IRQ_Handler:
.global IRQ_Handler
.func IRQ_Handler
.extern IRQCount
.extern IRQTable
.extern IRQNestLevel
/* prologue */
SUB LR, LR, #4 /* Pre-adjust LR */
SRSDB SP!, #Mode_SVC /* Save LR_IRQ and SPRS_IRQ to SVC mode stack */
CPS #Mode_SVC /* Switch to SVC mode, to avoid a nested interrupt corrupting LR on a BL */
PUSH {R0-R3, R12} /* Save remaining APCS corruptible registers to SVC stack */
/* AND R1, SP, #4 */ /* Ensure stack is 8-byte aligned */
MOV R3, SP /* Ensure stack is 8-byte aligned */
AND R1, R3, #4
SUB SP, SP, R1 /* Adjust stack */
PUSH {R1, LR} /* Store stack adjustment and LR_SVC to SVC stack */
LDR R0, =IRQNestLevel /* Get address of nesting counter */
LDR R1, [R0]
ADD R1, R1, #1 /* Increment nesting counter */
STR R1, [R0]
/* identify and acknowledge interrupt */
LDR R1, =GICI_BASE
LDR R0, [R1, #ICCHPIR_OFFSET] /* Dummy Read ICCHPIR (GIC CPU Interface register) to avoid GIC 390 errata 801120 */
LDR R0, [R1, #ICCIAR_OFFSET] /* Read ICCIAR (GIC CPU Interface register) */
DSB /* Ensure that interrupt acknowledge completes before re-enabling interrupts */
/* Workaround GIC 390 errata 733075
* If the ID is not 0, then service the interrupt as normal.
* If the ID is 0 and active, then service interrupt ID 0 as normal.
* If the ID is 0 but not active, then the GIC CPU interface may be locked-up, so unlock it
* with a dummy write to ICDIPR0. This interrupt should be treated as spurious and not serviced.
*/
LDR R2, =GICD_BASE
LDR R3, =GIC_ERRATA_CHECK_1
CMP R0, R3
BEQ unlock_cpu
LDR R3, =GIC_ERRATA_CHECK_2
CMP R0, R3
BEQ unlock_cpu
CMP R0, #0
BNE int_active /* If the ID is not 0, then service the interrupt */
LDR R3, [R2, #ICDABR0_OFFSET] /* Get the interrupt state */
TST R3, #1
BNE int_active /* If active, then service the interrupt */
unlock_cpu:
LDR R3, [R2, #ICDIPR0_OFFSET] /* Not active, so unlock the CPU interface */
STR R3, [R2, #ICDIPR0_OFFSET] /* with a dummy write */
DSB /* Ensure the write completes before continuing */
B ret_irq /* Do not service the spurious interrupt */
/* End workaround */
int_active:
LDR R2, =IRQCount /* Read number of IRQs */
LDR R2, [R2]
CMP R0, R2 /* Clean up and return if no handler */
BHS ret_irq /* In a single-processor system, spurious interrupt ID 1023 does not need any special handling */
LDR R2, =IRQTable /* Get address of handler */
LDR R2, [R2, R0, LSL #2]
CMP R2, #0 /* Clean up and return if handler address is 0 */
BEQ ret_irq
PUSH {R0,R1}
CPSIE i /* Now safe to re-enable interrupts */
BLX R2 /* Call handler. R0 will be IRQ number */
CPSID i /* Disable interrupts again */
/* write EOIR (GIC CPU Interface register) */
POP {R0,R1}
DSB /* Ensure that interrupt source is cleared before we write the EOIR */
ret_irq:
/* epilogue */
STR R0, [R1, #ICCEOIR_OFFSET]
LDR R0, =IRQNestLevel /* Get address of nesting counter */
LDR R1, [R0]
SUB R1, R1, #1 /* Decrement nesting counter */
STR R1, [R0]
POP {R1, LR} /* Get stack adjustment and restore LR_SVC */
ADD SP, SP, R1 /* Unadjust stack */
POP {R0-R3,R12} /* Restore stacked APCS registers */
RFEFD SP! /* Return from exception */
.endfunc
/* Macro to define default handlers. Default handler /* Macro to define default handlers. Default handler
* will be weak symbol and just dead loops. They can be * will be weak symbol and just dead loops. They can be
* overwritten by other handlers */ * overwritten by other handlers */
.macro def_default_handler handler_name .macro def_default_handler handler_name
.align 1 .align 1
.thumb_func .thumb_func
.weak \handler_name .weak \handler_name
.type \handler_name, %function .type \handler_name, %function
\handler_name : \handler_name :
b . b .
.size \handler_name, . - \handler_name .size \handler_name, . - \handler_name
.endm .endm
def_default_handler undefinedInstruction /* 0x24 */ def_default_handler SVC_Handler
def_default_handler softwareInterrupt /* 0x28 */
def_default_handler prefetchAboart /* 0x2c */
def_default_handler dataAbort /* 0x30 */
def_default_handler Default_Handler /* --- */
.global __disable_irq
.global __enable_irq
.global __disable_fiq
.global __enable_fiq
__disable_irq:
mrs r0,apsr @ formerly cpsr
and r0,r0,#0x80
cpsid i
bx lr
__enable_irq:
cpsie i
bx lr
__disable_fiq: /* User Initial Stack & Heap */
cpsid f
bx lr
__enable_fiq: .ifdef __MICROLIB
cpsie f
bx lr .global __initial_sp
.global __heap_base
.global __heap_limit
.else
.extern __use_two_region_memory
.global __user_initial_stackheap
__user_initial_stackheap:
LDR R0, = __HeapBase
LDR R1, =(__StackLimit + USR_Stack_Size)
LDR R2, = (__HeapBase + Heap_Size)
LDR R3, = __StackLimit
BX LR
.endif
.END
.end

View File

@ -26,8 +26,14 @@
* $Date:: $ * $Date:: $
* @brief RZ_A1 serial flash boot loader * @brief RZ_A1 serial flash boot loader
******************************************************************************/ ******************************************************************************/
#if defined (__CC_ARM)
#pragma arm section rodata = "BOOT_LOADER" #pragma arm section rodata = "BOOT_LOADER"
const char boot_loader[] __attribute__((used)) = const char boot_loader[] __attribute__((used)) =
#else
const char boot_loader[] __attribute__ ((section(".boot_loader"), used)) =
#endif
{ {
0x18,0xF0,0x9F,0xE5,0x18,0xF0,0x9F,0xE5,0x18,0xF0,0x9F,0xE5,0x18,0xF0,0x9F,0xE5, 0x18,0xF0,0x9F,0xE5,0x18,0xF0,0x9F,0xE5,0x18,0xF0,0x9F,0xE5,0x18,0xF0,0x9F,0xE5,
0x18,0xF0,0x9F,0xE5,0x18,0xF0,0x9F,0xE5,0x18,0xF0,0x9F,0xE5,0x18,0xF0,0x9F,0xE5, 0x18,0xF0,0x9F,0xE5,0x18,0xF0,0x9F,0xE5,0x18,0xF0,0x9F,0xE5,0x18,0xF0,0x9F,0xE5,
@ -814,6 +820,7 @@ const char boot_loader[] __attribute__((used)) =
0x00,0x02,0x00,0x18,0x00,0x00,0x02,0x20,0x04,0x00,0x9F,0xE5,0x10,0x0F,0x0C,0xEE, 0x00,0x02,0x00,0x18,0x00,0x00,0x02,0x20,0x04,0x00,0x9F,0xE5,0x10,0x0F,0x0C,0xEE,
0x1E,0xFF,0x2F,0xE1,0x00,0x00,0x00,0x18,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, 0x1E,0xFF,0x2F,0xE1,0x00,0x00,0x00,0x18,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
}; };
#if defined (__CC_ARM)
#pragma arm section #pragma arm section
#endif

View File

@ -72,16 +72,31 @@ extern uint32_t Image$$RO_DATA$$Base;
extern uint32_t Image$$RW_DATA$$Base; extern uint32_t Image$$RW_DATA$$Base;
extern uint32_t Image$$ZI_DATA$$Base; extern uint32_t Image$$ZI_DATA$$Base;
extern uint32_t Image$$TTB$$ZI$$Base; extern uint32_t Image$$TTB$$ZI$$Base;
#if defined( __CC_ARM )
#else
extern uint32_t Image$$RW_DATA_NC$$Base;
extern uint32_t Image$$ZI_DATA_NC$$Base;
#endif
extern uint32_t Image$$VECTORS$$Limit; extern uint32_t Image$$VECTORS$$Limit;
extern uint32_t Image$$RO_DATA$$Limit; extern uint32_t Image$$RO_DATA$$Limit;
extern uint32_t Image$$RW_DATA$$Limit; extern uint32_t Image$$RW_DATA$$Limit;
extern uint32_t Image$$ZI_DATA$$Limit; extern uint32_t Image$$ZI_DATA$$Limit;
#if defined( __CC_ARM )
#else
extern uint32_t Image$$RW_DATA_NC$$Limit;
extern uint32_t Image$$ZI_DATA_NC$$Limit;
#endif
#define VECTORS_SIZE (((uint32_t)&Image$$VECTORS$$Limit >> 20) - ((uint32_t)&Image$$VECTORS$$Base >> 20) + 1) #define VECTORS_SIZE (((uint32_t)&Image$$VECTORS$$Limit >> 20) - ((uint32_t)&Image$$VECTORS$$Base >> 20) + 1)
#define RO_DATA_SIZE (((uint32_t)&Image$$RO_DATA$$Limit >> 20) - ((uint32_t)&Image$$RO_DATA$$Base >> 20) + 1) #define RO_DATA_SIZE (((uint32_t)&Image$$RO_DATA$$Limit >> 20) - ((uint32_t)&Image$$RO_DATA$$Base >> 20) + 1)
#define RW_DATA_SIZE (((uint32_t)&Image$$RW_DATA$$Limit >> 20) - ((uint32_t)&Image$$RW_DATA$$Base >> 20) + 1) #define RW_DATA_SIZE (((uint32_t)&Image$$RW_DATA$$Limit >> 20) - ((uint32_t)&Image$$RW_DATA$$Base >> 20) + 1)
#define ZI_DATA_SIZE (((uint32_t)&Image$$ZI_DATA$$Limit >> 20) - ((uint32_t)&Image$$ZI_DATA$$Base >> 20) + 1) #define ZI_DATA_SIZE (((uint32_t)&Image$$ZI_DATA$$Limit >> 20) - ((uint32_t)&Image$$ZI_DATA$$Base >> 20) + 1)
#if defined( __CC_ARM )
#else
#define RW_DATA_NC_SIZE (((uint32_t)&Image$$RW_DATA_NC$$Limit >> 20) - ((uint32_t)&Image$$RW_DATA_NC$$Base >> 20) + 1)
#define ZI_DATA_NC_SIZE (((uint32_t)&Image$$ZI_DATA_NC$$Limit >> 20) - ((uint32_t)&Image$$ZI_DATA_NC$$Base >> 20) + 1)
#endif
static uint32_t Sect_Normal; //outer & inner wb/wa, non-shareable, executable, rw, domain 0, base addr 0 static uint32_t Sect_Normal; //outer & inner wb/wa, non-shareable, executable, rw, domain 0, base addr 0
static uint32_t Sect_Normal_NC; //non-shareable, non-executable, rw, domain 0, base addr 0 static uint32_t Sect_Normal_NC; //non-shareable, non-executable, rw, domain 0, base addr 0
@ -147,7 +162,12 @@ void create_translation_table(void)
__TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$VECTORS$$Base, VECTORS_SIZE, Sect_Normal_Cod); __TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$VECTORS$$Base, VECTORS_SIZE, Sect_Normal_Cod);
__TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$RW_DATA$$Base, RW_DATA_SIZE, Sect_Normal_RW); __TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$RW_DATA$$Base, RW_DATA_SIZE, Sect_Normal_RW);
__TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$ZI_DATA$$Base, ZI_DATA_SIZE, Sect_Normal_RW); __TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$ZI_DATA$$Base, ZI_DATA_SIZE, Sect_Normal_RW);
#if defined( __CC_ARM )
__TTSection (&Image$$TTB$$ZI$$Base, Renesas_RZ_A1_ONCHIP_SRAM_NC_BASE, 10, Sect_Normal_NC); __TTSection (&Image$$TTB$$ZI$$Base, Renesas_RZ_A1_ONCHIP_SRAM_NC_BASE, 10, Sect_Normal_NC);
#else
__TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$RW_DATA_NC$$Base, RW_DATA_NC_SIZE, Sect_Normal_NC);
__TTSection (&Image$$TTB$$ZI$$Base, (uint32_t)&Image$$ZI_DATA_NC$$Base, ZI_DATA_NC_SIZE, Sect_Normal_NC);
#endif
/* Set location of level 1 page table /* Set location of level 1 page table
; 31:14 - Translation table base addr (31:14-TTBCR.N, TTBCR.N is 0 out of reset) ; 31:14 - Translation table base addr (31:14-TTBCR.N, TTBCR.N is 0 out of reset)

View File

@ -41,12 +41,18 @@
#include "RZ_A1_Init.h" #include "RZ_A1_Init.h"
#if defined(__ARMCC_VERSION)
extern void $Super$$main(void); extern void $Super$$main(void);
__asm void FPUEnable(void); __asm void FPUEnable(void);
#else
void FPUEnable(void);
#endif
uint32_t IRQNestLevel; uint32_t IRQNestLevel;
#if defined(__ARMCC_VERSION)
/** /**
* Initialize the cache. * Initialize the cache.
* *
@ -92,6 +98,45 @@ void InitMemorySubsystem(void) {
} }
#pragma pop #pragma pop
#elif defined(__GNUC__)
void InitMemorySubsystem(void) {
/* This SVC is specific for reset where data / tlb / btac may contain undefined data, therefore before
* enabling the cache you must invalidate the instruction cache, the data cache, TLB, and BTAC.
* You are not required to invalidate the main TLB, even though it is recommended for safety
* reasons. This ensures compatibility with future revisions of the processor. */
unsigned int l2_id;
/* Invalidate undefined data */
__ca9u_inv_tlb_all();
__v7_inv_icache_all();
__v7_inv_dcache_all();
__v7_inv_btac();
/* Don't use this function during runtime since caches may contain valid data. For a correct cache maintenance you may need to execute a clean and
* invalidate in order to flush the valid data to the next level cache.
*/
__enable_mmu();
/* After MMU is enabled and data has been invalidated, enable caches and BTAC */
__enable_caches();
__enable_btac();
/* If present, you may also need to Invalidate and Enable L2 cache here */
l2_id = PL310_GetID();
if (l2_id)
{
PL310_InvAllByWay();
PL310_Enable();
}
}
#else
#endif
IRQHandler IRQTable[Renesas_RZ_A1_IRQ_MAX+1]; IRQHandler IRQTable[Renesas_RZ_A1_IRQ_MAX+1];
uint32_t IRQCount = sizeof IRQTable / 4; uint32_t IRQCount = sizeof IRQTable / 4;
@ -237,8 +282,8 @@ void CPAbtHandler(uint32_t IFSR, uint32_t IFAR, uint32_t LR) {
//this will be 2 when we have performed some maintenance and want to retry the instruction in thumb (state == 2) //this will be 2 when we have performed some maintenance and want to retry the instruction in thumb (state == 2)
//this will be 4 when we have performed some maintenance and want to retry the instruction in arm (state == 4) //this will be 4 when we have performed some maintenance and want to retry the instruction in arm (state == 4)
uint32_t CUndefHandler(uint32_t opcode, uint32_t state, uint32_t LR) { uint32_t CUndefHandler(uint32_t opcode, uint32_t state, uint32_t LR) {
const int THUMB = 2; const unsigned int THUMB = 2;
const int ARM = 4; const unsigned int ARM = 4;
//Lazy VFP/NEON initialisation and switching //Lazy VFP/NEON initialisation and switching
if ((state == ARM && ((opcode & 0x0C000000)) >> 26 == 0x03) || if ((state == ARM && ((opcode & 0x0C000000)) >> 26 == 0x03) ||
(state == THUMB && ((opcode & 0xEC000000)) >> 26 == 0x3B)) { (state == THUMB && ((opcode & 0xEC000000)) >> 26 == 0x3B)) {
@ -252,6 +297,7 @@ uint32_t CUndefHandler(uint32_t opcode, uint32_t state, uint32_t LR) {
while(1); while(1);
} }
#if defined(__ARMCC_VERSION)
#pragma push #pragma push
#pragma arm #pragma arm
//Critical section, called from undef handler, so systick is disabled //Critical section, called from undef handler, so systick is disabled
@ -296,3 +342,43 @@ __asm void FPUEnable(void) {
BX LR BX LR
} }
#pragma pop #pragma pop
#elif defined(__GNUC__)
void FPUEnable(void)
{
__asm__ __volatile__ (
".align 2 \n\t"
".arm \n\t"
"mrc p15,0,r1,c1,c0,2 \n\t"
"orr r1,r1,#0x00f00000 \n\t"
"mcr p15,0,r1,c1,c0,2 \n\t"
"vmrs r1,fpexc \n\t"
"orr r1,r1,#0x40000000 \n\t"
"vmsr fpexc,r1 \n\t"
"mov r2,#0 \n\t"
"vmov d0, r2,r2 \n\t"
"vmov d1, r2,r2 \n\t"
"vmov d2, r2,r2 \n\t"
"vmov d3, r2,r2 \n\t"
"vmov d4, r2,r2 \n\t"
"vmov d5, r2,r2 \n\t"
"vmov d6, r2,r2 \n\t"
"vmov d7, r2,r2 \n\t"
"vmov d8, r2,r2 \n\t"
"vmov d9, r2,r2 \n\t"
"vmov d10,r2,r2 \n\t"
"vmov d11,r2,r2 \n\t"
"vmov d12,r2,r2 \n\t"
"vmov d13,r2,r2 \n\t"
"vmov d14,r2,r2 \n\t"
"vmov d15,r2,r2 \n\t"
"vmrs r2,fpscr \n\t"
"ldr r3,=0x00086060 \n\t"
"and r2,r2,r3 \n\t"
"vmsr fpscr,r2 \n\t"
"bx lr \n\t"
);
}
#else
#endif

View File

@ -0,0 +1,94 @@
/* Copyright (c) 2009 - 2012 ARM LIMITED
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of ARM nor the names of its contributors may be used
to endorse or promote products derived from this software without
specific prior written permission.
*
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
---------------------------------------------------------------------------*/
/*----------------------------------------------------------------------------
* Functions
*---------------------------------------------------------------------------*/
.text
.global __v7_all_cache
/*
* __STATIC_ASM void __v7_all_cache(uint32_t op) {
*/
__v7_all_cache:
.arm
PUSH {R4-R11}
MRC p15, 1, R6, c0, c0, 1 // Read CLIDR
ANDS R3, R6, #0x07000000 // Extract coherency level
MOV R3, R3, LSR #23 // Total cache levels << 1
BEQ Finished // If 0, no need to clean
MOV R10, #0 // R10 holds current cache level << 1
Loop1: ADD R2, R10, R10, LSR #1 // R2 holds cache "Set" position
MOV R1, R6, LSR R2 // Bottom 3 bits are the Cache-type for this level
AND R1, R1, #7 // Isolate those lower 3 bits
CMP R1, #2
BLT Skip // No cache or only instruction cache at this level
MCR p15, 2, R10, c0, c0, 0 // Write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, R1, c0, c0, 0 // Reads current Cache Size ID register
AND R2, R1, #7 // Extract the line length field
ADD R2, R2, #4 // Add 4 for the line length offset (log2 16 bytes)
LDR R4, =0x3FF
ANDS R4, R4, R1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ R5, R4 // R5 is the bit position of the way size increment
LDR R7, =0x7FFF
ANDS R7, R7, R1, LSR #13 // R7 is the max number of the index size (right aligned)
Loop2: MOV R9, R4 // R9 working copy of the max way size (right aligned)
Loop3: ORR R11, R10, R9, LSL R5 // Factor in the Way number and cache number into R11
ORR R11, R11, R7, LSL R2 // Factor in the Set number
CMP R0, #0
BNE Dccsw
MCR p15, 0, R11, c7, c6, 2 // DCISW. Invalidate by Set/Way
B cont
Dccsw: CMP R0, #1
BNE Dccisw
MCR p15, 0, R11, c7, c10, 2 // DCCSW. Clean by Set/Way
B cont
Dccisw: MCR p15, 0, R11, c7, c14, 2 // DCCISW, Clean and Invalidate by Set/Way
cont: SUBS R9, R9, #1 // Decrement the Way number
BGE Loop3
SUBS R7, R7, #1 // Decrement the Set number
BGE Loop2
Skip: ADD R10, R10, #2 // increment the cache number
CMP R3, R10
BGT Loop1
Finished:
DSB
POP {R4-R11}
BX lr
.END
/*----------------------------------------------------------------------------
* end of file
*---------------------------------------------------------------------------*/

View File

@ -578,7 +578,576 @@ __STATIC_INLINE void __v7_clean_inv_dcache_all(void) {
#elif (defined (__GNUC__)) /*------------------ GNU Compiler ---------------------*/ #elif (defined (__GNUC__)) /*------------------ GNU Compiler ---------------------*/
//#error GNU Compiler support not implemented for Cortex-A /* GNU gcc specific functions */
#define MODE_USR 0x10
#define MODE_FIQ 0x11
#define MODE_IRQ 0x12
#define MODE_SVC 0x13
#define MODE_MON 0x16
#define MODE_ABT 0x17
#define MODE_HYP 0x1A
#define MODE_UND 0x1B
#define MODE_SYS 0x1F
__attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_irq(void)
{
__ASM volatile ("cpsie i");
}
/** \brief Disable IRQ Interrupts
This function disables IRQ interrupts by setting the I-bit in the CPSR.
Can only be executed in Privileged modes.
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __disable_irq(void)
{
uint32_t result;
__ASM volatile ("mrs %0, cpsr" : "=r" (result));
__ASM volatile ("cpsid i");
return(result & 0x80);
}
/** \brief Get APSR Register
This function returns the content of the APSR Register.
\return APSR Register value
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_APSR(void)
{
#if 1
uint32_t result;
__ASM volatile ("mrs %0, apsr" : "=r" (result) );
return (result);
#else
register uint32_t __regAPSR __ASM("apsr");
return(__regAPSR);
#endif
}
/** \brief Get CPSR Register
This function returns the content of the CPSR Register.
\return CPSR Register value
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_CPSR(void)
{
#if 1
register uint32_t __regCPSR;
__ASM volatile ("mrs %0, cpsr" : "=r" (__regCPSR));
#else
register uint32_t __regCPSR __ASM("cpsr");
#endif
return(__regCPSR);
}
#if 0
/** \brief Set Stack Pointer
This function assigns the given value to the current stack pointer.
\param [in] topOfStack Stack Pointer value to set
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_SP(uint32_t topOfStack)
{
register uint32_t __regSP __ASM("sp");
__regSP = topOfStack;
}
#endif
/** \brief Get link register
This function returns the value of the link register
\return Value of link register
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_LR(void)
{
register uint32_t __reglr __ASM("lr");
return(__reglr);
}
#if 0
/** \brief Set link register
This function sets the value of the link register
\param [in] lr LR value to set
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_LR(uint32_t lr)
{
register uint32_t __reglr __ASM("lr");
__reglr = lr;
}
#endif
/** \brief Set Process Stack Pointer
This function assigns the given value to the USR/SYS Stack Pointer (PSP).
\param [in] topOfProcStack USR/SYS Stack Pointer value to set
*/
extern void __set_PSP(uint32_t topOfProcStack);
/** \brief Set User Mode
This function changes the processor state to User Mode
\param [in] topOfProcStack USR/SYS Stack Pointer value to set
*/
extern void __set_CPS_USR(void);
/** \brief Enable FIQ
This function enables FIQ interrupts by clearing the F-bit in the CPSR.
Can only be executed in Privileged modes.
*/
#define __enable_fault_irq __enable_fiq
/** \brief Disable FIQ
This function disables FIQ interrupts by setting the F-bit in the CPSR.
Can only be executed in Privileged modes.
*/
#define __disable_fault_irq __disable_fiq
/** \brief Get FPSCR
This function returns the current value of the Floating Point Status/Control register.
\return Floating Point Status/Control register value
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FPSCR(void)
{
#if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
#if 1
uint32_t result;
__ASM volatile ("vmrs %0, fpscr" : "=r" (result) );
return (result);
#else
register uint32_t __regfpscr __ASM("fpscr");
return(__regfpscr);
#endif
#else
return(0);
#endif
}
/** \brief Set FPSCR
This function assigns the given value to the Floating Point Status/Control register.
\param [in] fpscr Floating Point Status/Control value to set
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FPSCR(uint32_t fpscr)
{
#if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
#if 1
__ASM volatile ("vmsr fpscr, %0" : : "r" (fpscr) );
#else
register uint32_t __regfpscr __ASM("fpscr");
__regfpscr = (fpscr);
#endif
#endif
}
/** \brief Get FPEXC
This function returns the current value of the Floating Point Exception Control register.
\return Floating Point Exception Control register value
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FPEXC(void)
{
#if (__FPU_PRESENT == 1)
#if 1
uint32_t result;
__ASM volatile ("vmrs %0, fpexc" : "=r" (result));
return (result);
#else
register uint32_t __regfpexc __ASM("fpexc");
return(__regfpexc);
#endif
#else
return(0);
#endif
}
/** \brief Set FPEXC
This function assigns the given value to the Floating Point Exception Control register.
\param [in] fpscr Floating Point Exception Control value to set
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FPEXC(uint32_t fpexc)
{
#if (__FPU_PRESENT == 1)
#if 1
__ASM volatile ("vmsr fpexc, %0" : : "r" (fpexc));
#else
register uint32_t __regfpexc __ASM("fpexc");
__regfpexc = (fpexc);
#endif
#endif
}
/** \brief Get CPACR
This function returns the current value of the Coprocessor Access Control register.
\return Coprocessor Access Control register value
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_CPACR(void)
{
#if 1
register uint32_t __regCPACR;
__ASM volatile ("mrc p15, 0, %0, c1, c0, 2" : "=r" (__regCPACR));
#else
register uint32_t __regCPACR __ASM("cp15:0:c1:c0:2");
#endif
return __regCPACR;
}
/** \brief Set CPACR
This function assigns the given value to the Coprocessor Access Control register.
\param [in] cpacr Coporcessor Acccess Control value to set
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_CPACR(uint32_t cpacr)
{
#if 1
__ASM volatile ("mcr p15, 0, %0, c1, c0, 2" : : "r" (cpacr));
#else
register uint32_t __regCPACR __ASM("cp15:0:c1:c0:2");
__regCPACR = cpacr;
#endif
__ISB();
}
/** \brief Get CBAR
This function returns the value of the Configuration Base Address register.
\return Configuration Base Address register value
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_CBAR() {
#if 1
register uint32_t __regCBAR;
__ASM volatile ("mrc p15, 4, %0, c15, c0, 0" : "=r" (__regCBAR));
#else
register uint32_t __regCBAR __ASM("cp15:4:c15:c0:0");
#endif
return(__regCBAR);
}
/** \brief Get TTBR0
This function returns the value of the Configuration Base Address register.
\return Translation Table Base Register 0 value
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_TTBR0() {
#if 1
register uint32_t __regTTBR0;
__ASM volatile ("mrc p15, 0, %0, c2, c0, 0" : "=r" (__regTTBR0));
#else
register uint32_t __regTTBR0 __ASM("cp15:0:c2:c0:0");
#endif
return(__regTTBR0);
}
/** \brief Set TTBR0
This function assigns the given value to the Coprocessor Access Control register.
\param [in] ttbr0 Translation Table Base Register 0 value to set
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_TTBR0(uint32_t ttbr0) {
#if 1
__ASM volatile ("mcr p15, 0, %0, c2, c0, 0" : : "r" (ttbr0));
#else
register uint32_t __regTTBR0 __ASM("cp15:0:c2:c0:0");
__regTTBR0 = ttbr0;
#endif
__ISB();
}
/** \brief Get DACR
This function returns the value of the Domain Access Control Register.
\return Domain Access Control Register value
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_DACR() {
#if 1
register uint32_t __regDACR;
__ASM volatile ("mrc p15, 0, %0, c3, c0, 0" : "=r" (__regDACR));
#else
register uint32_t __regDACR __ASM("cp15:0:c3:c0:0");
#endif
return(__regDACR);
}
/** \brief Set DACR
This function assigns the given value to the Coprocessor Access Control register.
\param [in] dacr Domain Access Control Register value to set
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_DACR(uint32_t dacr) {
#if 1
__ASM volatile ("mcr p15, 0, %0, c3, c0, 0" : : "r" (dacr));
#else
register uint32_t __regDACR __ASM("cp15:0:c3:c0:0");
__regDACR = dacr;
#endif
__ISB();
}
/******************************** Cache and BTAC enable ****************************************************/
/** \brief Set SCTLR
This function assigns the given value to the System Control Register.
\param [in] sctlr System Control Register, value to set
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_SCTLR(uint32_t sctlr)
{
#if 1
__ASM volatile ("mcr p15, 0, %0, c1, c0, 0" : : "r" (sctlr));
#else
register uint32_t __regSCTLR __ASM("cp15:0:c1:c0:0");
__regSCTLR = sctlr;
#endif
}
/** \brief Get SCTLR
This function returns the value of the System Control Register.
\return System Control Register value
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_SCTLR() {
#if 1
register uint32_t __regSCTLR;
__ASM volatile ("mrc p15, 0, %0, c1, c0, 0" : "=r" (__regSCTLR));
#else
register uint32_t __regSCTLR __ASM("cp15:0:c1:c0:0");
#endif
return(__regSCTLR);
}
/** \brief Enable Caches
Enable Caches
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_caches(void) {
// Set I bit 12 to enable I Cache
// Set C bit 2 to enable D Cache
__set_SCTLR( __get_SCTLR() | (1 << 12) | (1 << 2));
}
/** \brief Disable Caches
Disable Caches
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_caches(void) {
// Clear I bit 12 to disable I Cache
// Clear C bit 2 to disable D Cache
__set_SCTLR( __get_SCTLR() & ~(1 << 12) & ~(1 << 2));
__ISB();
}
/** \brief Enable BTAC
Enable BTAC
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_btac(void) {
// Set Z bit 11 to enable branch prediction
__set_SCTLR( __get_SCTLR() | (1 << 11));
__ISB();
}
/** \brief Disable BTAC
Disable BTAC
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_btac(void) {
// Clear Z bit 11 to disable branch prediction
__set_SCTLR( __get_SCTLR() & ~(1 << 11));
}
/** \brief Enable MMU
Enable MMU
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_mmu(void) {
// Set M bit 0 to enable the MMU
// Set AFE bit to enable simplified access permissions model
// Clear TRE bit to disable TEX remap and A bit to disable strict alignment fault checking
__set_SCTLR( (__get_SCTLR() & ~(1 << 28) & ~(1 << 1)) | 1 | (1 << 29));
__ISB();
}
/** \brief Enable MMU
Enable MMU
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_mmu(void) {
// Clear M bit 0 to disable the MMU
__set_SCTLR( __get_SCTLR() & ~1);
__ISB();
}
/******************************** TLB maintenance operations ************************************************/
/** \brief Invalidate the whole tlb
TLBIALL. Invalidate the whole tlb
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __ca9u_inv_tlb_all(void) {
#if 1
__ASM volatile ("mcr p15, 0, %0, c8, c7, 0" : : "r" (0));
#else
register uint32_t __TLBIALL __ASM("cp15:0:c8:c7:0");
__TLBIALL = 0;
#endif
__DSB();
__ISB();
}
/******************************** BTB maintenance operations ************************************************/
/** \brief Invalidate entire branch predictor array
BPIALL. Branch Predictor Invalidate All.
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_inv_btac(void) {
#if 1
__ASM volatile ("mcr p15, 0, %0, c7, c5, 6" : : "r" (0));
#else
register uint32_t __BPIALL __ASM("cp15:0:c7:c5:6");
__BPIALL = 0;
#endif
__DSB(); //ensure completion of the invalidation
__ISB(); //ensure instruction fetch path sees new state
}
/******************************** L1 cache operations ******************************************************/
/** \brief Invalidate the whole I$
ICIALLU. Instruction Cache Invalidate All to PoU
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_inv_icache_all(void) {
#if 1
__ASM volatile ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0));
#else
register uint32_t __ICIALLU __ASM("cp15:0:c7:c5:0");
__ICIALLU = 0;
#endif
__DSB(); //ensure completion of the invalidation
__ISB(); //ensure instruction fetch path sees new I cache state
}
/** \brief Clean D$ by MVA
DCCMVAC. Data cache clean by MVA to PoC
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_clean_dcache_mva(void *va) {
#if 1
__ASM volatile ("mcr p15, 0, %0, c7, c10, 1" : : "r" ((uint32_t)va));
#else
register uint32_t __DCCMVAC __ASM("cp15:0:c7:c10:1");
__DCCMVAC = (uint32_t)va;
#endif
__DMB(); //ensure the ordering of data cache maintenance operations and their effects
}
/** \brief Invalidate D$ by MVA
DCIMVAC. Data cache invalidate by MVA to PoC
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_inv_dcache_mva(void *va) {
#if 1
__ASM volatile ("mcr p15, 0, %0, c7, c6, 1" : : "r" ((uint32_t)va));
#else
register uint32_t __DCIMVAC __ASM("cp15:0:c7:c6:1");
__DCIMVAC = (uint32_t)va;
#endif
__DMB(); //ensure the ordering of data cache maintenance operations and their effects
}
/** \brief Clean and Invalidate D$ by MVA
DCCIMVAC. Data cache clean and invalidate by MVA to PoC
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_clean_inv_dcache_mva(void *va) {
#if 1
__ASM volatile ("mcr p15, 0, %0, c7, c14, 1" : : "r" ((uint32_t)va));
#else
register uint32_t __DCCIMVAC __ASM("cp15:0:c7:c14:1");
__DCCIMVAC = (uint32_t)va;
#endif
__DMB(); //ensure the ordering of data cache maintenance operations and their effects
}
/** \brief
* Generic mechanism for cleaning/invalidating the entire data or unified cache to the point of coherency.
*/
/** \brief __v7_all_cache - helper function
*/
extern void __v7_all_cache(uint32_t op);
/** \brief Invalidate the whole D$
DCISW. Invalidate by Set/Way
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_inv_dcache_all(void) {
__v7_all_cache(0);
}
/** \brief Clean the whole D$
DCCSW. Clean by Set/Way
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_clean_dcache_all(void) {
__v7_all_cache(1);
}
/** \brief Clean and invalidate the whole D$
DCCISW. Clean and Invalidate by Set/Way
*/
__attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_clean_inv_dcache_all(void) {
__v7_all_cache(2);
}
#include "core_ca_mmu.h"
#elif (defined (__TASKING__)) /*--------------- TASKING Compiler -----------------*/ #elif (defined (__TASKING__)) /*--------------- TASKING Compiler -----------------*/

View File

@ -19,7 +19,7 @@
/* ->Take measures about optimization problems of web compiler */ /* ->Take measures about optimization problems of web compiler */
/* Web compiler has problem that inlining code may not be generated correctly */ /* Web compiler has problem that inlining code may not be generated correctly */
/* when "-O3 -Otime" was specified. */ /* when "-O3 -Otime" was specified. */
#if defined(__arm__) && (__ARMCC_VERSION <= 5040027) #if defined(__CC_ARM) && (__ARMCC_VERSION <= 5040027)
#pragma Ospace #pragma Ospace
#endif #endif
/* <-Take measures about optimization problems of web compiler */ /* <-Take measures about optimization problems of web compiler */

View File

@ -27,7 +27,6 @@
#define NUM_OF_RX_DESCRIPTOR (16) #define NUM_OF_RX_DESCRIPTOR (16)
#define SIZE_OF_BUFFER (1600) /* Must be an integral multiple of 32 */ #define SIZE_OF_BUFFER (1600) /* Must be an integral multiple of 32 */
#define MAX_SEND_SIZE (1514) #define MAX_SEND_SIZE (1514)
#define BUFF_BOUNDARY_MSK (0x0000000F)
/* Ethernet Descriptor Value Define */ /* Ethernet Descriptor Value Define */
#define TD0_TFP_TOP_BOTTOM (0x30000000) #define TD0_TFP_TOP_BOTTOM (0x30000000)
#define TD0_TACT (0x80000000) #define TD0_TACT (0x80000000)
@ -105,14 +104,13 @@ typedef struct tag_edmac_recv_desc {
} edmac_recv_desc_t; } edmac_recv_desc_t;
/* memory */ /* memory */
#pragma arm section zidata="NC_BSS"
/* The whole transmit/receive descriptors (must be allocated in 16-byte boundaries) */ /* The whole transmit/receive descriptors (must be allocated in 16-byte boundaries) */
/* Transmit/receive buffers (must be allocated in 16-byte boundaries) */ /* Transmit/receive buffers (must be allocated in 16-byte boundaries) */
static uint8_t ehernet_nc_memory[(sizeof(edmac_send_desc_t) * NUM_OF_TX_DESCRIPTOR) + static uint8_t ehernet_nc_memory[(sizeof(edmac_send_desc_t) * NUM_OF_TX_DESCRIPTOR) +
(sizeof(edmac_recv_desc_t) * NUM_OF_RX_DESCRIPTOR) + (sizeof(edmac_recv_desc_t) * NUM_OF_RX_DESCRIPTOR) +
(NUM_OF_TX_DESCRIPTOR * SIZE_OF_BUFFER) + (NUM_OF_TX_DESCRIPTOR * SIZE_OF_BUFFER) +
(NUM_OF_RX_DESCRIPTOR * SIZE_OF_BUFFER) + BUFF_BOUNDARY_MSK]; (NUM_OF_RX_DESCRIPTOR * SIZE_OF_BUFFER)]
#pragma arm section zidata __attribute((section("NC_BSS"),aligned(16))); //16 bytes aligned!
static int32_t rx_read_offset; /* read offset */ static int32_t rx_read_offset; /* read offset */
static int32_t tx_wite_offset; /* write offset */ static int32_t tx_wite_offset; /* write offset */
static uint32_t send_top_index; static uint32_t send_top_index;
@ -489,7 +487,7 @@ static void lan_desc_create(void) {
uint8_t *p_memory_top; uint8_t *p_memory_top;
(void)memset((void *)ehernet_nc_memory, 0, sizeof(ehernet_nc_memory)); (void)memset((void *)ehernet_nc_memory, 0, sizeof(ehernet_nc_memory));
p_memory_top = (uint8_t *)(((uint32_t)ehernet_nc_memory + BUFF_BOUNDARY_MSK) & ~BUFF_BOUNDARY_MSK); p_memory_top = ehernet_nc_memory;
/* Descriptor area configuration */ /* Descriptor area configuration */
p_eth_desc_dsend = (edmac_send_desc_t *)p_memory_top; p_eth_desc_dsend = (edmac_send_desc_t *)p_memory_top;

View File

@ -17,4 +17,4 @@ typedef struct tag_ethernet_cfg {
extern int ethernetext_init(ethernet_cfg_t *p_ethcfg); extern int ethernetext_init(ethernet_cfg_t *p_ethcfg);
extern void ethernetext_start_stop(int32_t mode); extern void ethernetext_start_stop(int32_t mode);
extern int ethernetext_chk_link_mode(void); extern int ethernetext_chk_link_mode(void);
extern void ethernetext_set_link_mode(int link); extern void ethernetext_set_link_mode(int32_t link);

View File

@ -199,12 +199,13 @@ void gpio_irq_set(gpio_irq_t *obj, gpio_irq_event event, uint32_t enable) {
GIC_DisableIRQ((IRQn_Type)(nIRQn_h+obj->ch)); GIC_DisableIRQ((IRQn_Type)(nIRQn_h+obj->ch));
/* Clear Interrupt flags */ /* Clear Interrupt flags */
INTCIRQRR &= ~(1 << obj->ch); INTCIRQRR &= ~(1 << obj->ch);
INTCICR1 = work_icr_val;
} else if (obj->int_enable == 1) { } else if (obj->int_enable == 1) {
INTCICR1 = work_icr_val;
GIC_EnableIRQ((IRQn_Type)(nIRQn_h + obj->ch)); GIC_EnableIRQ((IRQn_Type)(nIRQn_h + obj->ch));
} else { } else {
/* Do Nothing */ INTCICR1 = work_icr_val;
} }
INTCICR1 = work_icr_val;
} }
void gpio_irq_enable(gpio_irq_t *obj) { void gpio_irq_enable(gpio_irq_t *obj) {

View File

@ -247,7 +247,7 @@ inline int i2c_start(i2c_t *obj) {
return 0; return 0;
} }
inline int i2c_restart(i2c_t *obj) { static inline int i2c_restart(i2c_t *obj) {
/* SR2.START = 0 */ /* SR2.START = 0 */
REG(SR2.UINT32) &= ~SR2_START; REG(SR2.UINT32) &= ~SR2_START;
/* ReStart condition */ /* ReStart condition */

View File

@ -235,8 +235,6 @@ void serial_init(serial_t *obj, PinName tx, PinName rx) {
b0 SPB2DT - Serial port break data : High-level */ b0 SPB2DT - Serial port break data : High-level */
obj->uart->SCSPTR = 0x0003u; // SPB2IO = 1, SPB2DT = 1 obj->uart->SCSPTR = 0x0003u; // SPB2IO = 1, SPB2DT = 1
obj->uart->SCSCR = 0x00F0;
/* ---- Line status register (SCLSR) setting ---- /* ---- Line status register (SCLSR) setting ----
b0 ORER - Overrun error detect : clear */ b0 ORER - Overrun error detect : clear */
@ -277,6 +275,10 @@ void serial_init(serial_t *obj, PinName tx, PinName rx) {
uart_data[obj->index].sw_rts.pin = NC; uart_data[obj->index].sw_rts.pin = NC;
uart_data[obj->index].sw_cts.pin = NC; uart_data[obj->index].sw_cts.pin = NC;
/* ---- Serial control register (SCSCR) setting ---- */
/* Setting the TE and RE bits enables the TxD and RxD pins to be used. */
obj->uart->SCSCR = 0x00F0;
is_stdio_uart = (uart == STDIO_UART) ? (1) : (0); is_stdio_uart = (uart == STDIO_UART) ? (1) : (0);
if (is_stdio_uart) { if (is_stdio_uart) {

View File

@ -30,6 +30,7 @@ int us_ticker_inited = 0;
static double count_clock = 0; static double count_clock = 0;
static uint32_t last_read = 0; static uint32_t last_read = 0;
static uint32_t wrap_arround = 0; static uint32_t wrap_arround = 0;
static uint64_t ticker_us_last64 = 0;
void us_ticker_interrupt(void) { void us_ticker_interrupt(void) {
us_ticker_irq_handler(); us_ticker_irq_handler();
@ -61,69 +62,67 @@ void us_ticker_init(void) {
GIC_EnableIRQ(US_TICKER_TIMER_IRQn); GIC_EnableIRQ(US_TICKER_TIMER_IRQn);
} }
uint64_t us_ticker_read64() { static uint64_t ticker_read_counter64(void) {
uint32_t val; uint32_t cnt_val;
volatile uint64_t val64; uint64_t cnt_val64;
int check_irq_masked;
check_irq_masked = __disable_irq();
if (!us_ticker_inited) if (!us_ticker_inited)
us_ticker_init(); us_ticker_init();
/* read counter */ /* read counter */
val = OSTM1CNT; cnt_val = OSTM1CNT;
if ( last_read > val ) { if (last_read > cnt_val) {
wrap_arround++; wrap_arround++;
} }
last_read = val; last_read = cnt_val;
val64 = ((uint64_t)wrap_arround << 32) + val; cnt_val64 = ((uint64_t)wrap_arround << 32) + cnt_val;
/* clock to us */ return cnt_val64;
val64 = val64 / count_clock; }
uint32_t us_ticker_read() {
uint64_t cnt_val64;
uint64_t us_val64;
int check_irq_masked;
check_irq_masked = __disable_irq();
cnt_val64 = ticker_read_counter64();
us_val64 = (cnt_val64 / count_clock);
ticker_us_last64 = us_val64;
if (!check_irq_masked) { if (!check_irq_masked) {
__enable_irq(); __enable_irq();
} }
return val64; /* clock to us */
} return (uint32_t)us_val64;
uint32_t us_ticker_read() {
return (uint32_t)us_ticker_read64();
} }
void us_ticker_set_interrupt(timestamp_t timestamp) { void us_ticker_set_interrupt(timestamp_t timestamp) {
// set match value // set match value
volatile uint64_t set_cmp_val = 0; uint64_t timestamp64;
uint64_t timestamp_tmp; uint64_t set_cmp_val64;
int64_t timestamp_req; volatile uint32_t set_cmp_val;
int64_t timestamp_comp; uint64_t count_val_64;
uint64_t timestamp_now = us_ticker_read64();
/* calc compare mach timestamp */ /* calc compare mach timestamp */
set_cmp_val = (timestamp_now & 0xFFFFFFFF00000000) + timestamp; timestamp64 = (ticker_us_last64 & 0xFFFFFFFF00000000) + timestamp;
if (timestamp < (ticker_us_last64 & 0x00000000FFFFFFFF)) {
timestamp_tmp = (uint64_t)timestamp; /* This event is wrap arround */
timestamp_req = (int64_t)timestamp_tmp; timestamp64 += 0x100000000;
timestamp_tmp = (uint64_t)(timestamp_now & 0x00000000FFFFFFFF);
timestamp_comp = (int64_t)timestamp_tmp;
if (timestamp_req <= timestamp_comp + 1) {
if (((timestamp_req - timestamp_comp) <= 1) && ((timestamp_req - timestamp_comp) >= -10)) {
/* This event was in the past */
us_ticker_irq_handler();
return;
} else {
/* This event is wrap arround */
set_cmp_val += 0x100000000;
}
} }
/* calc compare mach timestamp */ /* calc compare mach timestamp */
set_cmp_val = set_cmp_val * count_clock; set_cmp_val64 = timestamp64 * count_clock;
OSTM1CMP = (uint32_t)(set_cmp_val & 0xffffffff); set_cmp_val = (uint32_t)(set_cmp_val64 & 0x00000000FFFFFFFF);
count_val_64 = ticker_read_counter64();
if (set_cmp_val64 <= (count_val_64 + 500)) {
GIC_SetPendingIRQ(US_TICKER_TIMER_IRQn);
GIC_EnableIRQ(US_TICKER_TIMER_IRQn);
return;
}
OSTM1CMP = set_cmp_val;
GIC_EnableIRQ(US_TICKER_TIMER_IRQn); GIC_EnableIRQ(US_TICKER_TIMER_IRQn);
} }
@ -132,6 +131,5 @@ void us_ticker_disable_interrupt(void) {
} }
void us_ticker_clear_interrupt(void) { void us_ticker_clear_interrupt(void) {
/* There are no Flags of OSTM1 to clear here */ GIC_ClearPendingIRQ(US_TICKER_TIMER_IRQn);
/* Do Nothing */
} }

View File

@ -258,40 +258,6 @@ void _main_init (void) {
#pragma pop #pragma pop
#endif #endif
#else #else
#if 0
//#ifdef __MBED_CMSIS_RTOS_CA9
__asm void __rt_entry (void) {
IMPORT __user_setup_stackheap
IMPORT __rt_lib_init
IMPORT os_thread_def_main
IMPORT osKernelInitialize
IMPORT osKernelStart
IMPORT osThreadCreate
IMPORT InterruptHandlerRegister
IMPORT PendSV_Handler
IMPORT OS_Tick_Handler
IMPORT exit
BL __user_setup_stackheap
MOV R1,R2
BL __rt_lib_init
BL osKernelInitialize
LDR R0,=os_thread_def_main
MOVS R1,#0
BL osThreadCreate
BL osKernelStart
MOVS R0,#0
LDR R1,=PendSV_Handler
BL InterruptHandlerRegister
MOVS R0,#134
LDR R1,=OS_Tick_Handler
BL InterruptHandlerRegister
BL exit
ALIGN
}
#else
__asm void __rt_entry (void) { __asm void __rt_entry (void) {
IMPORT __user_setup_stackheap IMPORT __user_setup_stackheap
@ -315,7 +281,6 @@ __asm void __rt_entry (void) {
ALIGN ALIGN
} }
#endif #endif
#endif
#elif defined (__GNUC__) #elif defined (__GNUC__)
@ -375,7 +340,7 @@ __attribute ((noreturn)) void __cs3_start_c (void){
__attribute__((naked)) void software_init_hook (void) { __attribute__((naked)) void software_init_hook (void) {
__asm ( __asm (
".syntax unified\n" ".syntax unified\n"
".thumb\n" ".arm\n"
"movs r0,#0\n" "movs r0,#0\n"
"movs r1,#0\n" "movs r1,#0\n"
"mov r4,r0\n" "mov r4,r0\n"

View File

@ -0,0 +1,474 @@
/*----------------------------------------------------------------------------
* RL-ARM - RTX
*----------------------------------------------------------------------------
* Name: HAL_CA9.c
* Purpose: Hardware Abstraction Layer for Cortex-A9
* Rev.: 3 Sept 2013
*----------------------------------------------------------------------------
*
* Copyright (c) 2012 - 2013 ARM Limited
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*---------------------------------------------------------------------------*/
.global rt_set_PSP
.global rt_get_PSP
.global _alloc_box
.global _free_box
.global PendSV_Handler
.global OS_Tick_Handler
.EQU CPSR_T_BIT, 0x20
.EQU CPSR_I_BIT, 0x80
.EQU CPSR_F_BIT, 0x40
.EQU MODE_USR, 0x10
.EQU MODE_FIQ, 0x11
.EQU MODE_IRQ, 0x12
.EQU MODE_SVC, 0x13
.EQU MODE_ABT, 0x17
.EQU MODE_UND, 0x1B
.EQU MODE_SYS, 0x1F
.EQU TCB_TID, 3 /* 'task id' offset */
.EQU TCB_STACKF, 32 /* 'stack_frame' offset */
.EQU TCB_TSTACK, 36 /* 'tsk_stack' offset */
.extern rt_alloc_box
.extern os_tsk
.extern GICInterface_BASE
.extern rt_pop_req
.extern os_tick_irqack
.extern rt_systick
/*----------------------------------------------------------------------------
* Functions
*---------------------------------------------------------------------------*/
.text
@ For A-class, set USR/SYS stack
@ __asm void rt_set_PSP (U32 stack) {
rt_set_PSP:
.arm
MRS R1, CPSR
CPS #MODE_SYS @no effect in USR mode
ISB
MOV SP, R0
MSR CPSR_c, R1 @no effect in USR mode
ISB
BX LR
@ }
@ For A-class, get USR/SYS stack
@ __asm U32 rt_get_PSP (void) {
rt_get_PSP:
.arm
MRS R1, CPSR
CPS #MODE_SYS @no effect in USR mode
ISB
MOV R0, SP
MSR CPSR_c, R1 @no effect in USR mode
ISB
BX LR
@ }
/*--------------------------- _alloc_box ------------------------------------*/
@ __asm void *_alloc_box (void *box_mem) {
_alloc_box:
/* Function wrapper for Unprivileged/Privileged mode. */
.arm
LDR R12,=rt_alloc_box @ __cpp(rt_alloc_box)
MRS R2, CPSR
LSLS R2, #28
BXNE R12
SVC 0
BX LR
@ }
/*--------------------------- _free_box -------------------------------------*/
@ __asm int _free_box (void *box_mem, void *box) {
_free_box:
/* Function wrapper for Unprivileged/Privileged mode. */
.arm
LDR R12,=rt_free_box @ __cpp(rt_free_box)
MRS R2, CPSR
LSLS R2, #28
BXNE R12
SVC 0
BX LR
@ }
/*-------------------------- SVC_Handler -----------------------------------*/
@ #pragma push
@ #pragma arm
@ __asm void SVC_Handler (void) {
.type SVC_Handler, %function
.global SVC_Handler
SVC_Handler:
@ PRESERVE8
.arm
.extern rt_tsk_lock
.extern rt_tsk_unlock
.extern SVC_Count
.extern SVC_Table
.extern rt_stk_check
.extern FPUEnable
.EQU Mode_SVC, 0x13
SRSDB SP!, #Mode_SVC @ Push LR_SVC and SPRS_SVC onto SVC mode stack
PUSH {R4} @ Push R4 so we can use it as a temp
MRS R4,SPSR @ Get SPSR
TST R4,#CPSR_T_BIT @ Check Thumb Bit
LDRNEH R4,[LR,#-2] @ Thumb: Load Halfword
BICNE R4,R4,#0xFF00 @ Extract SVC Number
LDREQ R4,[LR,#-4] @ ARM: Load Word
BICEQ R4,R4,#0xFF000000 @ Extract SVC Number
/* Lock out systick and re-enable interrupts */
PUSH {R0-R3,R12,LR}
AND R12, SP, #4 @ Ensure stack is 8-byte aligned
SUB SP, SP, R12 @ Adjust stack
PUSH {R12, LR} @ Store stack adjustment and dummy LR to SVC stack
BLX rt_tsk_lock
CPSIE i
POP {R12, LR} @ Get stack adjustment & discard dummy LR
ADD SP, SP, R12 @ Unadjust stack
POP {R0-R3,R12,LR}
CMP R4,#0
BNE SVC_User
MRS R4,SPSR
PUSH {R4} @ Push R4 so we can use it as a temp
AND R4, SP, #4 @ Ensure stack is 8-byte aligned
SUB SP, SP, R4 @ Adjust stack
PUSH {R4, LR} @ Store stack adjustment and dummy LR
BLX R12
POP {R4, LR} @ Get stack adjustment & discard dummy LR
ADD SP, SP, R4 @ Unadjust stack
POP {R4} @ Restore R4
MSR SPSR_cxsf,R4
/* Here we will be in SVC mode (even if coming in from PendSV_Handler or OS_Tick_Handler) */
Sys_Switch:
LDR LR,=os_tsk @ __cpp(&os_tsk)
LDM LR,{R4,LR} @ os_tsk.run, os_tsk.new
CMP R4,LR
BNE switching
PUSH {R0-R3,R12,LR}
AND R12, SP, #4 @ Ensure stack is 8-byte aligned
SUB SP, SP, R12 @ Adjust stack
PUSH {R12, LR} @ Store stack adjustment and dummy LR to SVC stack
CPSID i
BLX rt_tsk_unlock
POP {R12, LR} @ Get stack adjustment & discard dummy LR
ADD SP, SP, R12 @ Unadjust stack
POP {R0-R3,R12,LR}
POP {R4}
RFEFD SP! @ Return from exception, no task switch
switching:
CLREX
CMP R4,#0
ADDEQ SP,SP,#12 @ Original R4, LR & SPSR do not need to be popped when we are paging in a different task
BEQ SVC_Next @ Runtask deleted?
PUSH {R8-R11} @ R4 and LR already stacked
MOV R10,R4 @ Preserve os_tsk.run
MOV R11,LR @ Preserve os_tsk.new
ADD R8,SP,#16 @ Unstack R4,LR
LDMIA R8,{R4,LR}
SUB SP,SP,#4 @ Make space on the stack for the next instn
STMIA SP,{SP}^ @ Put User SP onto stack
POP {R8} @ Pop User SP into R8
MRS R9,SPSR
STMDB R8!,{R9} @ User CPSR
STMDB R8!,{LR} @ User PC
STMDB R8,{LR}^ @ User LR
SUB R8,R8,#4 @ No writeback for store of User LR
STMDB R8!,{R0-R3,R12} @ User R0-R3,R12
MOV R3,R10 @ os_tsk.run
MOV LR,R11 @ os_tsk.new
POP {R9-R12}
ADD SP,SP,#12 @ Fix up SP for unstack of R4, LR & SPSR
STMDB R8!,{R4-R7,R9-R12} @ User R4-R11
@ If applicable, stack VFP state
MRC p15,0,R1,c1,c0,2 @ VFP/NEON access enabled? (CPACR)
AND R2,R1,#0x00F00000
CMP R2,#0x00F00000
BNE no_outgoing_vfp
VMRS R2,FPSCR
STMDB R8!,{R2,R4} @ Push FPSCR, maintain 8-byte alignment
VSTMDB R8!,{S0-S31}
LDRB R2,[R3,#TCB_STACKF] @ Record in TCB that VFP state is stacked
ORR R2,#2
STRB R2,[R3,#TCB_STACKF]
no_outgoing_vfp:
STR R8,[R3,#TCB_TSTACK]
MOV R4,LR
PUSH {R4} @ Push R4 so we can use it as a temp
AND R4, SP, #4 @ Ensure stack is 8-byte aligned
SUB SP, SP, R4 @ Adjust stack
PUSH {R4, LR} @ Store stack adjustment and dummy LR to SVC stack
BLX rt_stk_check
POP {R4, LR} @ Get stack adjustment & discard dummy LR
ADD SP, SP, R4 @ Unadjust stack
POP {R4} @ Restore R4
MOV LR,R4
SVC_Next: @ R4 == os_tsk.run, LR == os_tsk.new, R0-R3, R5-R12 corruptible
LDR R1,=os_tsk @ __cpp(&os_tsk), os_tsk.run = os_tsk.new
STR LR,[R1]
LDRB R1,[LR,#TCB_TID] @ os_tsk.run->task_id
LSL R1,#8 @ Store PROCID
MCR p15,0,R1,c13,c0,1 @ Write CONTEXTIDR
LDR R0,[LR,#TCB_TSTACK] @ os_tsk.run->tsk_stack
@ Does incoming task have VFP state in stack?
LDRB R3,[LR,#TCB_STACKF]
TST R3,#0x2
MRC p15,0,R1,c1,c0,2 @ Read CPACR
ANDEQ R1,R1,#0xFF0FFFFF @ Disable VFP access if incoming task does not have stacked VFP state
ORRNE R1,R1,#0x00F00000 @ Enable VFP access if incoming task does have stacked VFP state
MCR p15,0,R1,c1,c0,2 @ Write CPACR
BEQ no_incoming_vfp
ISB @ We only need the sync if we enabled, otherwise we will context switch before next VFP instruction anyway
VLDMIA R0!,{S0-S31}
LDR R2,[R0]
VMSR FPSCR,R2
ADD R0,R0,#8
no_incoming_vfp:
LDR R1,[R0,#60] @ Restore User CPSR
MSR SPSR_cxsf,R1
LDMIA R0!,{R4-R11} @ Restore User R4-R11
ADD R0,R0,#4 @ Restore User R1-R3,R12
LDMIA R0!,{R1-R3,R12}
LDMIA R0,{LR}^ @ Restore User LR
ADD R0,R0,#4 @ No writeback for load to user LR
LDMIA R0!,{LR} @ Restore User PC
ADD R0,R0,#4 @ Correct User SP for unstacked user CPSR
PUSH {R0} @ Push R0 onto stack
LDMIA SP,{SP}^ @ Get R0 off stack into User SP
ADD SP,SP,#4 @ Put SP back
LDR R0,[R0,#-32] @ Restore R0
PUSH {R0-R3,R12,LR}
AND R12, SP, #4 @ Ensure stack is 8-byte aligned
SUB SP, SP, R12 @ Adjust stack
PUSH {R12, LR} @ Store stack adjustment and dummy LR to SVC stack
CPSID i
BLX rt_tsk_unlock
POP {R12, LR} @ Get stack adjustment & discard dummy LR
ADD SP, SP, R12 @ Unadjust stack
POP {R0-R3,R12,LR}
MOVS PC,LR @ Return from exception
/*------------------- User SVC -------------------------------*/
SVC_User:
LDR R12,=SVC_Count
LDR R12,[R12]
CMP R4,R12 @ Check for overflow
BHI SVC_Done
LDR R12,=SVC_Table-4
LDR R12,[R12,R4,LSL #2] @ Load SVC Function Address
MRS R4,SPSR @ Save SPSR
PUSH {R4} @ Push R4 so we can use it as a temp
AND R4, SP, #4 @ Ensure stack is 8-byte aligned
SUB SP, SP, R4 @ Adjust stack
PUSH {R4, LR} @ Store stack adjustment and dummy LR
BLX R12 @ Call SVC Function
POP {R4, LR} @ Get stack adjustment & discard dummy LR
ADD SP, SP, R4 @ Unadjust stack
POP {R4} @ Restore R4
MSR SPSR_cxsf,R4 @ Restore SPSR
SVC_Done:
PUSH {R0-R3,R12,LR}
PUSH {R4} @ Push R4 so we can use it as a temp
AND R4, SP, #4 @ Ensure stack is 8-byte aligned
SUB SP, SP, R4 @ Adjust stack
PUSH {R4, LR} @ Store stack adjustment and dummy LR
CPSID i
BLX rt_tsk_unlock
POP {R4, LR} @ Get stack adjustment & discard dummy LR
ADD SP, SP, R4 @ Unadjust stack
POP {R4} @ Restore R4
POP {R0-R3,R12,LR}
POP {R4}
RFEFD SP! @ Return from exception
@ }
@ #pragma pop
@ #pragma push
@ #pragma arm
@ __asm void PendSV_Handler (U32 IRQn) {
PendSV_Handler:
.arm
.extern rt_tsk_lock
.extern IRQNestLevel
ADD SP,SP,#8 @ fix up stack pointer (R0 has been pushed and will never be popped, R1 was pushed for stack alignment)
@ Disable systick interrupts, then write EOIR. We want interrupts disabled before we enter the context switcher.
PUSH {R0, R1}
BLX rt_tsk_lock
POP {R0, R1}
LDR R1, =GICInterface_BASE @ __cpp(&GICInterface_BASE)
LDR R1, [R1, #0]
STR R0, [R1, #0x10]
LDR R0, =IRQNestLevel @ Get address of nesting counter
LDR R1, [R0]
SUB R1, R1, #1 @ Decrement nesting counter
STR R1, [R0]
BLX rt_pop_req @ __cpp(rt_pop_req)
POP {R1, LR} @ Get stack adjustment & discard dummy LR
ADD SP, SP, R1 @ Unadjust stack
LDR R0,[SP,#24]
MSR SPSR_cxsf,R0
POP {R0-R3,R12} @ Leave SPSR & LR on the stack
PUSH {R4}
B Sys_Switch
@ }
@ #pragma pop
@ #pragma push
@ #pragma arm
@ __asm void OS_Tick_Handler (U32 IRQn) {
OS_Tick_Handler:
.arm
ADD SP,SP,#8 @ fix up stack pointer (R0 has been pushed and will never be popped, R1 was pushed for stack alignment)
PUSH {R0, R1}
BLX rt_tsk_lock
POP {R0, R1}
LDR R1, =GICInterface_BASE @ __cpp(&GICInterface_BASE)
LDR R1, [R1, #0]
STR R0, [R1, #0x10]
LDR R0, =IRQNestLevel @ Get address of nesting counter
LDR R1, [R0]
SUB R1, R1, #1 @ Decrement nesting counter
STR R1, [R0]
BLX os_tick_irqack @ __cpp(os_tick_irqack)
BLX rt_systick @ __cpp(rt_systick)
POP {R1, LR} @ Get stack adjustment & discard dummy LR
ADD SP, SP, R1 @ Unadjust stack
LDR R0,[SP,#24]
MSR SPSR_cxsf,R0
POP {R0-R3,R12} @ Leave SPSR & LR on the stack
PUSH {R4}
B Sys_Switch
@ }
@ #pragma pop
.global __set_PSP
@ __STATIC_ASM void __set_PSP(uint32_t topOfProcStack)
@ {
__set_PSP:
@ PRESERVE8
.arm
BIC R0, R0, #7 @ensure stack is 8-byte aligned
MRS R1, CPSR
CPS #MODE_SYS @no effect in USR mode
MOV SP, R0
MSR CPSR_c, R1 @no effect in USR mode
ISB
BX LR
@ }
.global __set_CPS_USR
@ __STATIC_ASM void __set_CPS_USR(void)
@ {
__set_CPS_USR:
.arm
CPS #MODE_USR
BX LR
@ }
.END
/*----------------------------------------------------------------------------
* end of file
*---------------------------------------------------------------------------*/

View File

@ -0,0 +1,60 @@
/*----------------------------------------------------------------------------
* RL-ARM - RTX
*----------------------------------------------------------------------------
* Name: SVC_TABLE.S
* Purpose: Pre-defined SVC Table for Cortex-M
* Rev.: V4.70
*----------------------------------------------------------------------------
*
* Copyright (c) 1999-2009 KEIL, 2009-2013 ARM Germany GmbH
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*---------------------------------------------------------------------------*/
.section SVC_TABLE @, CODE, READONLY
.align 5
.global SVC_Count
.EQU SVC_Cnt, (SVC_End-SVC_Table)/4
SVC_Count:
.word SVC_Cnt
@ Import user SVC functions here.
@ .extern __SVC_1
.global SVC_Table
SVC_Table:
@ Insert user SVC functions here. SVC 0 used by RTL Kernel.
@ .word __SVC_1 @ InitMemorySubsystem
@SVC_End
SVC_End:
.END
/*----------------------------------------------------------------------------
* end of file
*---------------------------------------------------------------------------*/

View File

@ -145,11 +145,63 @@ typedef uint32_t __attribute__((vector_size(16))) ret128;
#define RET_pointer __r0 #define RET_pointer __r0
#define RET_int32_t __r0 #define RET_int32_t __r0
#define RET_uint32_t __r0
#define RET_osStatus __r0 #define RET_osStatus __r0
#define RET_osPriority __r0 #define RET_osPriority __r0
#define RET_osEvent {(osStatus)__r0, {(uint32_t)__r1}, {(void *)__r2}} #define RET_osEvent {(osStatus)__r0, {(uint32_t)__r1}, {(void *)__r2}}
#define RET_osCallback {(void *)__r0, (void *)__r1} #define RET_osCallback {(void *)__r0, (void *)__r1}
#if defined (__ARM_PCS_VFP)
#define osEvent_type void
#define osEvent_ret_status { __asm ("MOV r0, %0;" \
: /* no outputs */ \
: "r"(ret.status) \
: "r0" \
); \
}
#define osEvent_ret_value { __asm ("MOV r1, %0;" \
"MOV r0, %1;" \
: /* no outputs */ \
: "r"(ret.value.v), \
"r"(ret.status) \
: "r0", "r1" \
); \
}
#define osEvent_ret_msg { __asm ("MOV r2, %0;" \
"MOV r1, %1;" \
"MOV r0, %2;" \
: /* no outputs */ \
: "r"(ret.def.message_id), \
"r"(ret.value.v), \
"r"(ret.status) \
: "r0", "r1" , "r2" \
); \
}
#define osEvent_ret_mail { __asm ("MOV r2, %0;" \
"MOV r1, %1;" \
"MOV r0, %2;" \
: /* no outputs */ \
: "r"(ret.def.mail_id), \
"r"(ret.value.v), \
"r"(ret.status) \
: "r0", "r1" , "r2" \
); \
}
#define osCallback_type void
#define osCallback_ret { __asm ("MOV r1, %0;" \
"MOV r0, %1;" \
: /* no outputs */ \
: "r"(ret.arg), \
"r"(ret.fp) \
: "r0", "r1" \
); \
}
#else /* defined (__ARM_PCS_VFP) */
#define osEvent_type ret128 #define osEvent_type ret128
#define osEvent_ret_status (ret128){ret.status} #define osEvent_ret_status (ret128){ret.status}
#define osEvent_ret_value (ret128){ret.status, ret.value.v} #define osEvent_ret_value (ret128){ret.status, ret.value.v}
@ -159,6 +211,8 @@ typedef uint32_t __attribute__((vector_size(16))) ret128;
#define osCallback_type ret64 #define osCallback_type ret64
#define osCallback_ret (ret64) {(uint32_t)ret.fp, (uint32_t)ret.arg} #define osCallback_ret (ret64) {(uint32_t)ret.fp, (uint32_t)ret.arg}
#endif /* defined (__ARM_PCS_VFP) */
#define SVC_ArgN(n) \ #define SVC_ArgN(n) \
register int __r##n __asm("r"#n); register int __r##n __asm("r"#n);
@ -808,14 +862,24 @@ os_InRegs osEvent_type svcWait (uint32_t millisec) {
if (millisec == 0) { if (millisec == 0) {
ret.status = osOK; ret.status = osOK;
#if defined (__GNUC__) && defined (__ARM_PCS_VFP)
osEvent_ret_status;
return;
#else
return osEvent_ret_status; return osEvent_ret_status;
#endif
} }
/* To Do: osEventSignal, osEventMessage, osEventMail */ /* To Do: osEventSignal, osEventMessage, osEventMail */
rt_dly_wait(rt_ms2tick(millisec)); rt_dly_wait(rt_ms2tick(millisec));
ret.status = osEventTimeout; ret.status = osEventTimeout;
#if defined (__GNUC__) && defined (__ARM_PCS_VFP)
osEvent_ret_status;
return;
#else
return osEvent_ret_status; return osEvent_ret_status;
#endif
} }
#endif #endif
@ -1046,13 +1110,23 @@ os_InRegs osCallback_type svcTimerCall (osTimerId timer_id) {
if (pt == NULL) { if (pt == NULL) {
ret.fp = NULL; ret.fp = NULL;
ret.arg = NULL; ret.arg = NULL;
#if defined (__GNUC__) && defined (__ARM_PCS_VFP)
osCallback_ret;
return;
#else
return osCallback_ret; return osCallback_ret;
#endif
} }
ret.fp = (void *)pt->timer->ptimer; ret.fp = (void *)pt->timer->ptimer;
ret.arg = pt->arg; ret.arg = pt->arg;
#if defined (__GNUC__) && defined (__ARM_PCS_VFP)
osCallback_ret;
return;
#else
return osCallback_ret; return osCallback_ret;
#endif
} }
static __INLINE osStatus isrMessagePut (osMessageQId queue_id, uint32_t info, uint32_t millisec); static __INLINE osStatus isrMessagePut (osMessageQId queue_id, uint32_t info, uint32_t millisec);
@ -1195,7 +1269,12 @@ os_InRegs osEvent_type svcSignalWait (int32_t signals, uint32_t millisec) {
if (signals & (0xFFFFFFFF << osFeature_Signals)) { if (signals & (0xFFFFFFFF << osFeature_Signals)) {
ret.status = osErrorValue; ret.status = osErrorValue;
#if defined (__GNUC__) && defined (__ARM_PCS_VFP)
osEvent_ret_status;
return;
#else
return osEvent_ret_status; return osEvent_ret_status;
#endif
} }
if (signals != 0) { // Wait for all specified signals if (signals != 0) { // Wait for all specified signals
@ -1212,7 +1291,12 @@ os_InRegs osEvent_type svcSignalWait (int32_t signals, uint32_t millisec) {
ret.value.signals = 0; ret.value.signals = 0;
} }
#if defined (__GNUC__) && defined (__ARM_PCS_VFP)
osEvent_ret_value;
return;
#else
return osEvent_ret_value; return osEvent_ret_value;
#endif
} }
@ -1694,24 +1778,44 @@ os_InRegs osEvent_type svcMessageGet (osMessageQId queue_id, uint32_t millisec)
if (queue_id == NULL) { if (queue_id == NULL) {
ret.status = osErrorParameter; ret.status = osErrorParameter;
#if defined (__GNUC__) && defined (__ARM_PCS_VFP)
osEvent_ret_status;
return;
#else
return osEvent_ret_status; return osEvent_ret_status;
#endif
} }
if (((P_MCB)queue_id)->cb_type != MCB) { if (((P_MCB)queue_id)->cb_type != MCB) {
ret.status = osErrorParameter; ret.status = osErrorParameter;
#if defined (__GNUC__) && defined (__ARM_PCS_VFP)
osEvent_ret_status;
return;
#else
return osEvent_ret_status; return osEvent_ret_status;
#endif
} }
res = rt_mbx_wait(queue_id, &ret.value.p, rt_ms2tick(millisec)); res = rt_mbx_wait(queue_id, &ret.value.p, rt_ms2tick(millisec));
if (res == OS_R_TMO) { if (res == OS_R_TMO) {
ret.status = millisec ? osEventTimeout : osOK; ret.status = millisec ? osEventTimeout : osOK;
#if defined (__GNUC__) && defined (__ARM_PCS_VFP)
osEvent_ret_value;
return;
#else
return osEvent_ret_value; return osEvent_ret_value;
#endif
} }
ret.status = osEventMessage; ret.status = osEventMessage;
#if defined (__GNUC__) && defined (__ARM_PCS_VFP)
osEvent_ret_value;
return;
#else
return osEvent_ret_value; return osEvent_ret_value;
#endif
} }

View File

@ -54,7 +54,7 @@
#if defined (__CC_ARM) /* ARM Compiler */ #if defined (__CC_ARM) /* ARM Compiler */
#if ((__TARGET_ARCH_7_M || __TARGET_ARCH_7E_M || __TARGET_ARCH_7_A) && !NO_EXCLUSIVE_ACCESS) #if ((__TARGET_ARCH_7_M || __TARGET_ARCH_7E_M || __TARGET_ARCH_7_A) && !defined(NO_EXCLUSIVE_ACCESS))
#define __USE_EXCLUSIVE_ACCESS #define __USE_EXCLUSIVE_ACCESS
#else #else
#undef __USE_EXCLUSIVE_ACCESS #undef __USE_EXCLUSIVE_ACCESS
@ -62,7 +62,16 @@
#elif defined (__GNUC__) /* GNU Compiler */ #elif defined (__GNUC__) /* GNU Compiler */
#error GNU Compiler support not implemented for Cortex-A #undef __USE_EXCLUSIVE_ACCESS
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
#define __TARGET_FPU_VFP 1
#else
#define __TARGET_FPU_VFP 0
#endif
#define __inline inline
#define __weak __attribute__((weak))
#elif defined (__ICCARM__) /* IAR Compiler */ #elif defined (__ICCARM__) /* IAR Compiler */
@ -94,7 +103,6 @@ extern const U32 GICInterface_BASE;
priority = GICI_ICCPMR; \ priority = GICI_ICCPMR; \
GICI_ICCPMR = 0xff; \ GICI_ICCPMR = 0xff; \
GICI_ICCPMR = GICI_ICCPMR - 1; \ GICI_ICCPMR = GICI_ICCPMR - 1; \
while(GICI_ICCPMR > priority);\
__DSB();\ __DSB();\
if(!irq_dis) __enable_irq(); \ if(!irq_dis) __enable_irq(); \

View File

@ -86,7 +86,7 @@ OFFICIAL_MBED_LIBRARY_BUILD = (
('LPC11U68', ('ARM', 'uARM','GCC_ARM','GCC_CR', 'IAR')), ('LPC11U68', ('ARM', 'uARM','GCC_ARM','GCC_CR', 'IAR')),
('OC_MBUINO', ('ARM', 'uARM', 'GCC_ARM', 'IAR')), ('OC_MBUINO', ('ARM', 'uARM', 'GCC_ARM', 'IAR')),
('RZ_A1H' , ('ARM',)), ('RZ_A1H' , ('ARM', 'GCC_ARM')),
) )

View File

@ -0,0 +1,77 @@
# This file was automagically generated by mbed.org. For more information,
# see http://mbed.org/handbook/Exporting-to-GCC-ARM-Embedded
GCC_BIN =
PROJECT = {{name}}
OBJECTS = {% for f in to_be_compiled %}{{f}} {% endfor %}
SYS_OBJECTS = {% for f in object_files %}{{f}} {% endfor %}
INCLUDE_PATHS = {% for p in include_paths %}-I{{p}} {% endfor %}
LIBRARY_PATHS = {% for p in library_paths %}-L{{p}} {% endfor %}
LIBRARIES = {% for lib in libraries %}-l{{lib}} {% endfor %}
LINKER_SCRIPT = {{linker_script}}
###############################################################################
AS = $(GCC_BIN)arm-none-eabi-as
CC = $(GCC_BIN)arm-none-eabi-gcc
CPP = $(GCC_BIN)arm-none-eabi-g++
LD = $(GCC_BIN)arm-none-eabi-gcc
OBJCOPY = $(GCC_BIN)arm-none-eabi-objcopy
OBJDUMP = $(GCC_BIN)arm-none-eabi-objdump
SIZE = $(GCC_BIN)arm-none-eabi-size
CPU = -mcpu=cortex-a9 -mthumb-interwork -march=armv7-a -mfpu=vfpv3-d16 -mfloat-abi=hard
CC_FLAGS = $(CPU) -c -g -fno-common -fmessage-length=0 -Wall -Wextra -Wno-unused-parameter -Wno-missing-field-initializers
CC_FLAGS += -fno-exceptions -fno-builtin -ffunction-sections -fdata-sections -fno-delete-null-pointer-checks -fomit-frame-pointer
CC_FLAGS += -MMD -MP -mno-unaligned-access
CC_SYMBOLS = {% for s in symbols %}-D{{s}} {% endfor %}
LD_FLAGS = $(CPU) -Wl,--gc-sections --specs=nano.specs -u _printf_float -u _scanf_float -Wl,--wrap,main
LD_FLAGS += -Wl,-Map=$(PROJECT).map,--cref
LD_SYS_LIBS = -lstdc++ -lsupc++ -lm -lc -lgcc -lnosys
ifeq ($(DEBUG), 1)
CC_FLAGS += -DDEBUG -O0
else
CC_FLAGS += -DNDEBUG -Os
endif
all: $(PROJECT).bin $(PROJECT).hex
clean:
rm -f $(PROJECT).bin $(PROJECT).elf $(PROJECT).hex $(PROJECT).map $(PROJECT).lst $(OBJECTS) $(DEPS)
.s.o:
$(AS) $(CPU) -o $@ $<
.c.o:
$(CC) $(CC_FLAGS) $(CC_SYMBOLS) -std=gnu99 $(INCLUDE_PATHS) -o $@ $<
.cpp.o:
$(CPP) $(CC_FLAGS) $(CC_SYMBOLS) -std=gnu++98 -fno-rtti $(INCLUDE_PATHS) -o $@ $<
$(PROJECT).elf: $(OBJECTS) $(SYS_OBJECTS)
$(LD) $(LD_FLAGS) -T$(LINKER_SCRIPT) $(LIBRARY_PATHS) -o $@ $^ $(LIBRARIES) $(LD_SYS_LIBS) $(LIBRARIES) $(LD_SYS_LIBS)
@echo ""
@echo "*****"
@echo "***** You must modify vector checksum value in *.bin and *.hex files."
@echo "*****"
@echo ""
$(SIZE) $@
$(PROJECT).bin: $(PROJECT).elf
@$(OBJCOPY) -O binary $< $@
$(PROJECT).hex: $(PROJECT).elf
@$(OBJCOPY) -O ihex $< $@
$(PROJECT).lst: $(PROJECT).elf
@$(OBJDUMP) -Sdh $< > $@
lst: $(PROJECT).lst
size:
$(SIZE) $(PROJECT).elf
DEPS = $(OBJECTS:.o=.d) $(SYS_OBJECTS:.o=.d)
-include $(DEPS)

View File

@ -75,7 +75,8 @@ class GccArm(Exporter):
'NRF51_DONGLE', 'NRF51_DONGLE',
'SEEED_TINY_BLE', 'SEEED_TINY_BLE',
'DISCO_F401VC', 'DISCO_F401VC',
'DELTA_DFCM_NNN40', 'DELTA_DFCM_NNN40',
'RZ_A1H',
] ]
DOT_IN_RELATIVE_PATH = True DOT_IN_RELATIVE_PATH = True

View File

@ -169,6 +169,7 @@ if __name__ == '__main__':
('gcc_arm', 'NUCLEO_F334R8'), ('gcc_arm', 'NUCLEO_F334R8'),
('gcc_arm', 'MTS_MDOT_F405RG'), ('gcc_arm', 'MTS_MDOT_F405RG'),
('gcc_arm', 'MTS_MDOT_F411RE'), ('gcc_arm', 'MTS_MDOT_F411RE'),
('gcc_arm', 'RZ_A1H'),
('ds5_5', 'LPC1768'), ('ds5_5', 'LPC11U24'), ('ds5_5', 'LPC1768'), ('ds5_5', 'LPC11U24'),

View File

@ -959,7 +959,7 @@ class RZ_A1H(Target):
Target.__init__(self) Target.__init__(self)
self.core = "Cortex-A9" self.core = "Cortex-A9"
self.extra_labels = ['RENESAS', 'MBRZA1H'] self.extra_labels = ['RENESAS', 'MBRZA1H']
self.supported_toolchains = ["ARM"] self.supported_toolchains = ["ARM", "GCC_ARM"]
self.supported_form_factors = ["ARDUINO"] self.supported_form_factors = ["ARDUINO"]
self.default_toolchain = "ARM" self.default_toolchain = "ARM"

View File

@ -48,6 +48,15 @@ class GCC(mbedToolchain):
self.cpu.append("-mfpu=fpv4-sp-d16") self.cpu.append("-mfpu=fpv4-sp-d16")
self.cpu.append("-mfloat-abi=softfp") self.cpu.append("-mfloat-abi=softfp")
if target.core == "Cortex-A9":
self.cpu.append("-mthumb-interwork")
self.cpu.append("-marm")
self.cpu.append("-march=armv7-a")
self.cpu.append("-mfpu=vfpv3-d16")
self.cpu.append("-mfloat-abi=hard")
self.cpu.append("-mno-unaligned-access")
# Note: We are using "-O2" instead of "-Os" to avoid this known GCC bug: # Note: We are using "-O2" instead of "-Os" to avoid this known GCC bug:
# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=46762 # http://gcc.gnu.org/bugzilla/show_bug.cgi?id=46762
common_flags = ["-c", "-Wall", "-Wextra", common_flags = ["-c", "-Wall", "-Wextra",
@ -177,6 +186,8 @@ class GCC_ARM(GCC):
self.ld.append("--specs=nano.specs") self.ld.append("--specs=nano.specs")
if target.name in ["LPC1768", "LPC4088", "LPC4088_DM", "LPC4330", "UBLOX_C027", "LPC2368"]: if target.name in ["LPC1768", "LPC4088", "LPC4088_DM", "LPC4330", "UBLOX_C027", "LPC2368"]:
self.ld.extend(["-u _printf_float", "-u _scanf_float"]) self.ld.extend(["-u _printf_float", "-u _scanf_float"])
elif target.name in ["RZ_A1H"]:
self.ld.extend(["-u_printf_float", "-u_scanf_float"])
self.sys_libs.append("nosys") self.sys_libs.append("nosys")