mirror of https://github.com/ARMmbed/mbed-os.git
[NANO130] Support SPI
parent
70a618835a
commit
71cd9c3b55
|
@ -69,9 +69,16 @@ typedef enum {
|
|||
} UARTName;
|
||||
|
||||
typedef enum {
|
||||
SPI_0 = (int) NU_MODNAME(SPI0_BASE, 0, 0),
|
||||
SPI_1 = (int) NU_MODNAME(SPI1_BASE, 1, 0),
|
||||
SPI_2 = (int) NU_MODNAME(SPI2_BASE, 2, 0)
|
||||
SPI_0_0 = (int) NU_MODNAME(SPI0_BASE, 0, 0),
|
||||
SPI_0_1 = (int) NU_MODNAME(SPI0_BASE, 0, 1),
|
||||
SPI_1_0 = (int) NU_MODNAME(SPI1_BASE, 1, 0),
|
||||
SPI_1_1 = (int) NU_MODNAME(SPI1_BASE, 1, 1),
|
||||
SPI_2_0 = (int) NU_MODNAME(SPI2_BASE, 2, 0),
|
||||
SPI_2_1 = (int) NU_MODNAME(SPI2_BASE, 2, 1),
|
||||
|
||||
SPI_0 = SPI_0_0,
|
||||
SPI_1 = SPI_1_0,
|
||||
SPI_2 = SPI_2_0
|
||||
} SPIName;
|
||||
|
||||
typedef enum {
|
||||
|
@ -103,13 +110,17 @@ typedef enum {
|
|||
} RTCName;
|
||||
|
||||
typedef enum {
|
||||
DMA_0 = (int) NU_MODNAME(VDMA_BASE, 0, 0),
|
||||
DMA_1 = (int) NU_MODNAME(PDMA1_BASE, 0, 0),
|
||||
DMA_2 = (int) NU_MODNAME(PDMA2_BASE, 0, 0),
|
||||
DMA_3 = (int) NU_MODNAME(PDMA3_BASE, 0, 0),
|
||||
DMA_4 = (int) NU_MODNAME(PDMA4_BASE, 0, 0),
|
||||
DMA_5 = (int) NU_MODNAME(PDMA5_BASE, 0, 0),
|
||||
DMA_6 = (int) NU_MODNAME(PDMA6_BASE, 0, 0),
|
||||
DMAGCR_0 = (int) NU_MODNAME(PDMAGCR_BASE, 0, 0)
|
||||
} DMAGCR_Name;
|
||||
|
||||
typedef enum {
|
||||
DMA_0_0 = (int) NU_MODNAME(VDMA_BASE, 0, 0),
|
||||
DMA_1_0 = (int) NU_MODNAME(PDMA1_BASE, 1, 0),
|
||||
DMA_2_0 = (int) NU_MODNAME(PDMA2_BASE, 2, 0),
|
||||
DMA_3_0 = (int) NU_MODNAME(PDMA3_BASE, 3, 0),
|
||||
DMA_4_0 = (int) NU_MODNAME(PDMA4_BASE, 4, 0),
|
||||
DMA_5_0 = (int) NU_MODNAME(PDMA5_BASE, 5, 0),
|
||||
DMA_6_0 = (int) NU_MODNAME(PDMA6_BASE, 6, 0),
|
||||
} DMAName;
|
||||
|
||||
typedef enum {
|
||||
|
|
|
@ -89,6 +89,7 @@ struct spi_s {
|
|||
uint32_t event;
|
||||
//void (*irq_handler_tx_async)(void);
|
||||
//void (*irq_handler_rx_async)(void);
|
||||
uint32_t hdlr_async;
|
||||
};
|
||||
|
||||
struct i2c_s {
|
||||
|
|
|
@ -0,0 +1,861 @@
|
|||
/* mbed Microcontroller Library
|
||||
* Copyright (c) 2015-2017 Nuvoton
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "spi_api.h"
|
||||
|
||||
#if DEVICE_SPI
|
||||
|
||||
#include "cmsis.h"
|
||||
#include "pinmap.h"
|
||||
#include "PeripheralPins.h"
|
||||
#include "nu_modutil.h"
|
||||
#include "nu_miscutil.h"
|
||||
#include "nu_bitutil.h"
|
||||
|
||||
#if DEVICE_SPI_ASYNCH
|
||||
#include "dma_api.h"
|
||||
#include "dma.h"
|
||||
#endif
|
||||
|
||||
#define NU_SPI_FRAME_MIN 8
|
||||
#define NU_SPI_FRAME_MAX 32
|
||||
#define NU_SPI_FIFO_DEPTH 8
|
||||
|
||||
struct nu_spi_var {
|
||||
spi_t * obj;
|
||||
void (*vec)(void);
|
||||
#if DEVICE_SPI_ASYNCH
|
||||
uint8_t pdma_perp_tx;
|
||||
uint8_t pdma_perp_rx;
|
||||
#endif
|
||||
};
|
||||
|
||||
// NOTE: NANO130 doesn't support relocating vector table. ISR vector passed into NVIC_SetVector() can only be weak symbol defined in startup_Nano100Series.c.
|
||||
void SPI0_IRQHandler(void);
|
||||
void SPI1_IRQHandler(void);
|
||||
void SPI2_IRQHandler(void);
|
||||
static void spi_irq(spi_t *obj);
|
||||
|
||||
static struct nu_spi_var spi0_var = {
|
||||
.obj = NULL,
|
||||
.vec = SPI0_IRQHandler,
|
||||
#if DEVICE_SPI_ASYNCH
|
||||
.pdma_perp_tx = PDMA_SPI0_TX,
|
||||
.pdma_perp_rx = PDMA_SPI0_RX
|
||||
#endif
|
||||
};
|
||||
static struct nu_spi_var spi1_var = {
|
||||
.obj = NULL,
|
||||
.vec = SPI1_IRQHandler,
|
||||
#if DEVICE_SPI_ASYNCH
|
||||
.pdma_perp_tx = PDMA_SPI1_TX,
|
||||
.pdma_perp_rx = PDMA_SPI1_RX
|
||||
#endif
|
||||
};
|
||||
static struct nu_spi_var spi2_var = {
|
||||
.obj = NULL,
|
||||
.vec = SPI2_IRQHandler,
|
||||
#if DEVICE_SPI_ASYNCH
|
||||
.pdma_perp_tx = PDMA_SPI2_TX,
|
||||
.pdma_perp_rx = PDMA_SPI2_RX
|
||||
#endif
|
||||
};
|
||||
|
||||
#if DEVICE_SPI_ASYNCH
|
||||
static void spi_enable_vector_interrupt(spi_t *obj, uint32_t handler, uint8_t enable);
|
||||
static void spi_master_enable_interrupt(spi_t *obj, uint8_t enable, uint32_t mask);
|
||||
static uint32_t spi_master_write_asynch(spi_t *obj, uint32_t tx_limit);
|
||||
static uint32_t spi_master_read_asynch(spi_t *obj);
|
||||
static uint32_t spi_event_check(spi_t *obj);
|
||||
static void spi_enable_event(spi_t *obj, uint32_t event, uint8_t enable);
|
||||
static void spi_buffer_set(spi_t *obj, const void *tx, size_t tx_length, void *rx, size_t rx_length);
|
||||
static void spi_check_dma_usage(DMAUsage *dma_usage, int *dma_ch_tx, int *dma_ch_rx);
|
||||
static uint8_t spi_get_data_width(spi_t *obj);
|
||||
static int spi_is_tx_complete(spi_t *obj);
|
||||
static int spi_is_rx_complete(spi_t *obj);
|
||||
static int spi_writeable(spi_t * obj);
|
||||
static int spi_readable(spi_t * obj);
|
||||
static void spi_dma_handler_tx(uint32_t id, uint32_t event_dma);
|
||||
static void spi_dma_handler_rx(uint32_t id, uint32_t event_dma);
|
||||
#endif
|
||||
|
||||
static uint32_t spi_modinit_mask = 0;
|
||||
|
||||
static const struct nu_modinit_s spi_modinit_tab[] = {
|
||||
{SPI_0, SPI0_MODULE, CLK_CLKSEL2_SPI0_S_HCLK, MODULE_NoMsk, SPI0_RST, SPI0_IRQn, &spi0_var},
|
||||
{SPI_1, SPI1_MODULE, CLK_CLKSEL2_SPI1_S_HCLK, MODULE_NoMsk, SPI1_RST, SPI1_IRQn, &spi1_var},
|
||||
{SPI_2, SPI2_MODULE, CLK_CLKSEL2_SPI2_S_HCLK, MODULE_NoMsk, SPI2_RST, SPI2_IRQn, &spi2_var},
|
||||
|
||||
{NC, 0, 0, 0, 0, (IRQn_Type) 0, NULL}
|
||||
};
|
||||
|
||||
void spi_init(spi_t *obj, PinName mosi, PinName miso, PinName sclk, PinName ssel) {
|
||||
// Determine which SPI_x the pins are used for
|
||||
uint32_t spi_mosi = pinmap_peripheral(mosi, PinMap_SPI_MOSI);
|
||||
uint32_t spi_miso = pinmap_peripheral(miso, PinMap_SPI_MISO);
|
||||
uint32_t spi_sclk = pinmap_peripheral(sclk, PinMap_SPI_SCLK);
|
||||
uint32_t spi_ssel = pinmap_peripheral(ssel, PinMap_SPI_SSEL);
|
||||
uint32_t spi_data = pinmap_merge(spi_mosi, spi_miso);
|
||||
uint32_t spi_cntl = pinmap_merge(spi_sclk, spi_ssel);
|
||||
// NOTE:
|
||||
// NANO130: Support two-port SPI MOSI/MISO 0/1
|
||||
#if 0
|
||||
obj->spi.spi = (SPIName) pinmap_merge(spi_data, spi_cntl);
|
||||
#else
|
||||
if (NU_MODBASE(spi_data) == NU_MODBASE(spi_cntl)) {
|
||||
// NOTE: spi_data has subindex(port) encoded but spi_cntl hasn't.
|
||||
obj->spi.spi = (SPIName) spi_data;
|
||||
}
|
||||
else {
|
||||
obj->spi.spi = (SPIName) NC;
|
||||
}
|
||||
#endif
|
||||
MBED_ASSERT((int)obj->spi.spi != NC);
|
||||
|
||||
const struct nu_modinit_s *modinit = get_modinit(obj->spi.spi, spi_modinit_tab);
|
||||
MBED_ASSERT(modinit != NULL);
|
||||
MBED_ASSERT(modinit->modname == obj->spi.spi);
|
||||
|
||||
// Reset this module
|
||||
SYS_ResetModule(modinit->rsetidx);
|
||||
|
||||
// Select IP clock source
|
||||
CLK_SetModuleClock(modinit->clkidx, modinit->clksrc, modinit->clkdiv);
|
||||
// Enable IP clock
|
||||
CLK_EnableModuleClock(modinit->clkidx);
|
||||
|
||||
pinmap_pinout(mosi, PinMap_SPI_MOSI);
|
||||
pinmap_pinout(miso, PinMap_SPI_MISO);
|
||||
pinmap_pinout(sclk, PinMap_SPI_SCLK);
|
||||
pinmap_pinout(ssel, PinMap_SPI_SSEL);
|
||||
|
||||
obj->spi.pin_mosi = mosi;
|
||||
obj->spi.pin_miso = miso;
|
||||
obj->spi.pin_sclk = sclk;
|
||||
obj->spi.pin_ssel = ssel;
|
||||
|
||||
//SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
|
||||
// Configure the SPI data format and frequency
|
||||
//spi_format(obj, 8, 0, SPI_MSB); // 8 bits, mode 0
|
||||
//spi_frequency(obj, 1000000);
|
||||
|
||||
#if DEVICE_SPI_ASYNCH
|
||||
obj->spi.dma_usage = DMA_USAGE_NEVER;
|
||||
obj->spi.event = 0;
|
||||
obj->spi.dma_chn_id_tx = DMA_ERROR_OUT_OF_CHANNELS;
|
||||
obj->spi.dma_chn_id_rx = DMA_ERROR_OUT_OF_CHANNELS;
|
||||
#endif
|
||||
|
||||
// Mark this module to be inited.
|
||||
int i = modinit - spi_modinit_tab;
|
||||
spi_modinit_mask |= 1 << i;
|
||||
}
|
||||
|
||||
void spi_free(spi_t *obj)
|
||||
{
|
||||
#if DEVICE_SPI_ASYNCH
|
||||
if (obj->spi.dma_chn_id_tx != DMA_ERROR_OUT_OF_CHANNELS) {
|
||||
dma_channel_free(obj->spi.dma_chn_id_tx);
|
||||
obj->spi.dma_chn_id_tx = DMA_ERROR_OUT_OF_CHANNELS;
|
||||
}
|
||||
if (obj->spi.dma_chn_id_rx != DMA_ERROR_OUT_OF_CHANNELS) {
|
||||
dma_channel_free(obj->spi.dma_chn_id_rx);
|
||||
obj->spi.dma_chn_id_rx = DMA_ERROR_OUT_OF_CHANNELS;
|
||||
}
|
||||
#endif
|
||||
|
||||
SPI_Close((SPI_T *) NU_MODBASE(obj->spi.spi));
|
||||
|
||||
const struct nu_modinit_s *modinit = get_modinit(obj->spi.spi, spi_modinit_tab);
|
||||
MBED_ASSERT(modinit != NULL);
|
||||
MBED_ASSERT(modinit->modname == obj->spi.spi);
|
||||
SPI_DisableInt(((SPI_T *) NU_MODBASE(obj->spi.spi)), (SPI_FIFO_RXOVR_INTEN_MASK | SPI_FIFO_RX_INTEN_MASK | SPI_FIFO_TX_INTEN_MASK));
|
||||
NVIC_DisableIRQ(modinit->irq_n);
|
||||
|
||||
// Disable IP clock
|
||||
CLK_DisableModuleClock(modinit->clkidx);
|
||||
|
||||
//((struct nu_spi_var *) modinit->var)->obj = NULL;
|
||||
|
||||
// Mark this module to be deinited.
|
||||
int i = modinit - spi_modinit_tab;
|
||||
spi_modinit_mask &= ~(1 << i);
|
||||
}
|
||||
void spi_format(spi_t *obj, int bits, int mode, int slave)
|
||||
{
|
||||
MBED_ASSERT(bits >= NU_SPI_FRAME_MIN && bits <= NU_SPI_FRAME_MAX);
|
||||
|
||||
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
|
||||
|
||||
// NOTE 1: All configurations should be ready before enabling SPI peripheral.
|
||||
// NOTE 2: Re-configuration is allowed only as SPI peripheral is idle.
|
||||
// NOTE 3: On NANO130, SPI_CTL.GO_BUSY always reads as 1 in slave mode. Cannot use to judge busy or not.
|
||||
if (! (spi_base->CTL & SPI_CTL_SLAVE_Msk)) {
|
||||
while (SPI_IS_BUSY(spi_base));
|
||||
}
|
||||
|
||||
SPI_Open(spi_base,
|
||||
slave ? SPI_SLAVE : SPI_MASTER,
|
||||
(mode == 0) ? SPI_MODE_0 : (mode == 1) ? SPI_MODE_1 : (mode == 2) ? SPI_MODE_2 : SPI_MODE_3,
|
||||
bits,
|
||||
SPI_GetBusClock(spi_base));
|
||||
// NOTE: Hardcode to be MSB first.
|
||||
SPI_SET_MSB_FIRST(spi_base);
|
||||
// NOTE: On NANO130, FIFO mode defaults to disabled in SPI_Open(), so place SPI_EnableFIFO() after SPI_Open().
|
||||
SPI_EnableFIFO(spi_base, NU_SPI_FIFO_DEPTH / 2, NU_SPI_FIFO_DEPTH / 2);
|
||||
|
||||
if (! slave) {
|
||||
// Master
|
||||
if (obj->spi.pin_ssel != NC) {
|
||||
// Configure SS as low active.
|
||||
switch (NU_MODSUBINDEX(obj->spi.spi)) {
|
||||
case 0:
|
||||
SPI_EnableAutoSS(spi_base, SPI_SS0, SPI_SS0_ACTIVE_LOW);
|
||||
break;
|
||||
|
||||
case 1:
|
||||
SPI_EnableAutoSS(spi_base, SPI_SS1, SPI_SS1_ACTIVE_LOW);
|
||||
break;
|
||||
}
|
||||
}
|
||||
else {
|
||||
SPI_DisableAutoSS(spi_base);
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Slave
|
||||
// Configure SS as low active.
|
||||
switch (NU_MODSUBINDEX(obj->spi.spi)) {
|
||||
case 0:
|
||||
spi_base->SSR &= ~SPI_SS0_ACTIVE_HIGH;
|
||||
break;
|
||||
case 1:
|
||||
spi_base->SSR &= ~SPI_SS1_ACTIVE_HIGH;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void spi_frequency(spi_t *obj, int hz)
|
||||
{
|
||||
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
|
||||
|
||||
// NOTE: On NANO130, SPI_CTL.GO_BUSY always reads as 1 in slave mode. Cannot use to judge busy or not.
|
||||
if (! (spi_base->CTL & SPI_CTL_SLAVE_Msk)) {
|
||||
while (SPI_IS_BUSY(spi_base));
|
||||
}
|
||||
|
||||
SPI_SetBusClock((SPI_T *) NU_MODBASE(obj->spi.spi), hz);
|
||||
}
|
||||
|
||||
|
||||
int spi_master_write(spi_t *obj, int value)
|
||||
{
|
||||
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
|
||||
|
||||
// NOTE: Data in receive FIFO can be read out via ICE.
|
||||
// NOTE: On NANO130, not required as FIFO mode is enabled.
|
||||
//SPI_TRIGGER(spi_base);
|
||||
|
||||
// Wait for tx buffer empty
|
||||
while(! spi_writeable(obj));
|
||||
uint32_t TX = (NU_MODSUBINDEX(obj->spi.spi) == 0) ? ((uint32_t) &spi_base->TX0) : ((uint32_t) &spi_base->TX1);
|
||||
M32(TX) = value;
|
||||
|
||||
// Wait for rx buffer full
|
||||
while (! spi_readable(obj));
|
||||
uint32_t RX = (NU_MODSUBINDEX(obj->spi.spi) == 0) ? ((uint32_t) &spi_base->RX0) : ((uint32_t) &spi_base->RX1);
|
||||
int value2 = M32(RX);
|
||||
|
||||
return value2;
|
||||
}
|
||||
|
||||
#if DEVICE_SPISLAVE
|
||||
int spi_slave_receive(spi_t *obj)
|
||||
{
|
||||
// NOTE: On NANO130, not required as FIFO mode is enabled.
|
||||
//SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
|
||||
//SPI_TRIGGER(spi_base);
|
||||
|
||||
return spi_readable(obj);
|
||||
};
|
||||
|
||||
int spi_slave_read(spi_t *obj)
|
||||
{
|
||||
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
|
||||
|
||||
// NOTE: On NANO130, not required as FIFO mode is enabled.
|
||||
//SPI_TRIGGER(spi_base);
|
||||
|
||||
// Wait for rx buffer full
|
||||
while (! spi_readable(obj));
|
||||
uint32_t RX = (NU_MODSUBINDEX(obj->spi.spi) == 0) ? ((uint32_t) &spi_base->RX0) : ((uint32_t) &spi_base->RX1);
|
||||
int value = M32(RX);
|
||||
return value;
|
||||
}
|
||||
|
||||
void spi_slave_write(spi_t *obj, int value)
|
||||
{
|
||||
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
|
||||
|
||||
// NOTE: On NANO130, not required as FIFO mode is enabled.
|
||||
//SPI_TRIGGER(spi_base);
|
||||
|
||||
// Wait for tx buffer empty
|
||||
while(! spi_writeable(obj));
|
||||
uint32_t TX = (NU_MODSUBINDEX(obj->spi.spi) == 0) ? ((uint32_t) &spi_base->TX0) : ((uint32_t) &spi_base->TX1);
|
||||
M32(TX) = value;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if DEVICE_SPI_ASYNCH
|
||||
void spi_master_transfer(spi_t *obj, const void *tx, size_t tx_length, void *rx, size_t rx_length, uint8_t bit_width, uint32_t handler, uint32_t event, DMAUsage hint)
|
||||
{
|
||||
//MBED_ASSERT(bits >= NU_SPI_FRAME_MIN && bits <= NU_SPI_FRAME_MAX);
|
||||
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
|
||||
SPI_SET_DATA_WIDTH(spi_base, bit_width);
|
||||
|
||||
obj->spi.dma_usage = hint;
|
||||
spi_check_dma_usage(&obj->spi.dma_usage, &obj->spi.dma_chn_id_tx, &obj->spi.dma_chn_id_rx);
|
||||
uint32_t data_width = spi_get_data_width(obj);
|
||||
// Conditions to go DMA way:
|
||||
// (1) No DMA support for non-8 multiple data width.
|
||||
// (2) tx length >= rx length. Otherwise, as tx DMA is done, no bus activity for remaining rx.
|
||||
if ((data_width % 8) ||
|
||||
(tx_length < rx_length)) {
|
||||
obj->spi.dma_usage = DMA_USAGE_NEVER;
|
||||
dma_channel_free(obj->spi.dma_chn_id_tx);
|
||||
obj->spi.dma_chn_id_tx = DMA_ERROR_OUT_OF_CHANNELS;
|
||||
dma_channel_free(obj->spi.dma_chn_id_rx);
|
||||
obj->spi.dma_chn_id_rx = DMA_ERROR_OUT_OF_CHANNELS;
|
||||
}
|
||||
|
||||
// SPI IRQ is necessary for both interrupt way and DMA way
|
||||
spi_enable_event(obj, event, 1);
|
||||
spi_buffer_set(obj, tx, tx_length, rx, rx_length);
|
||||
|
||||
// NOTE: On NANO130, not required as FIFO mode is enabled.
|
||||
//SPI_TRIGGER(spi_base);
|
||||
|
||||
if (obj->spi.dma_usage == DMA_USAGE_NEVER) {
|
||||
// Interrupt way
|
||||
spi_master_write_asynch(obj, NU_SPI_FIFO_DEPTH / 2);
|
||||
spi_enable_vector_interrupt(obj, handler, 1);
|
||||
spi_master_enable_interrupt(obj, 1, SPI_FIFO_RX_INTEN_MASK | SPI_FIFO_TX_INTEN_MASK);
|
||||
} else {
|
||||
// DMA way
|
||||
const struct nu_modinit_s *modinit = get_modinit(obj->spi.spi, spi_modinit_tab);
|
||||
MBED_ASSERT(modinit != NULL);
|
||||
MBED_ASSERT(modinit->modname == obj->spi.spi);
|
||||
|
||||
// Configure tx DMA
|
||||
dma_enable(obj->spi.dma_chn_id_tx, 1); // Enable this DMA channel
|
||||
PDMA_SetTransferMode(obj->spi.dma_chn_id_tx,
|
||||
((struct nu_spi_var *) modinit->var)->pdma_perp_tx, // Peripheral connected to this PDMA
|
||||
0, // Scatter-gather disabled
|
||||
0); // Scatter-gather descriptor address
|
||||
PDMA_SetTransferCnt(obj->spi.dma_chn_id_tx,
|
||||
(data_width == 8) ? PDMA_WIDTH_8 : (data_width == 16) ? PDMA_WIDTH_16 : PDMA_WIDTH_32,
|
||||
tx_length);
|
||||
PDMA_SetTransferAddr(obj->spi.dma_chn_id_tx,
|
||||
(uint32_t) tx, // NOTE:
|
||||
// NUC472: End of source address
|
||||
// M451: Start of source address
|
||||
// NANO130: Start of destination address
|
||||
PDMA_SAR_INC, // Source address incremental
|
||||
NU_MODSUBINDEX(obj->spi.spi) == 0 ? (uint32_t) &spi_base->TX0 : (uint32_t) &spi_base->TX1, // Destination address
|
||||
PDMA_DAR_FIX); // Destination address fixed
|
||||
#if 0 // NOTE:
|
||||
// NANO130: No burst type setting
|
||||
PDMA_SetBurstType(obj->spi.dma_chn_id_tx,
|
||||
PDMA_REQ_SINGLE, // Single mode
|
||||
0); // Burst size
|
||||
#endif
|
||||
PDMA_EnableInt(obj->spi.dma_chn_id_tx,
|
||||
PDMA_IER_TD_IE_Msk); // Interrupt type
|
||||
// Register DMA event handler
|
||||
dma_set_handler(obj->spi.dma_chn_id_tx, (uint32_t) spi_dma_handler_tx, (uint32_t) obj, DMA_EVENT_ALL);
|
||||
|
||||
// Configure rx DMA
|
||||
dma_enable(obj->spi.dma_chn_id_rx, 1); // Enable this DMA channel
|
||||
PDMA_SetTransferMode(obj->spi.dma_chn_id_rx,
|
||||
((struct nu_spi_var *) modinit->var)->pdma_perp_rx, // Peripheral connected to this PDMA
|
||||
0, // Scatter-gather disabled
|
||||
0); // Scatter-gather descriptor address
|
||||
PDMA_SetTransferCnt(obj->spi.dma_chn_id_rx,
|
||||
(data_width == 8) ? PDMA_WIDTH_8 : (data_width == 16) ? PDMA_WIDTH_16 : PDMA_WIDTH_32,
|
||||
rx_length);
|
||||
PDMA_SetTransferAddr(obj->spi.dma_chn_id_rx,
|
||||
NU_MODSUBINDEX(obj->spi.spi) == 0 ? (uint32_t) &spi_base->RX0 : (uint32_t) &spi_base->RX1, // Source address
|
||||
PDMA_SAR_FIX, // Source address fixed
|
||||
(uint32_t) rx, // NOTE:
|
||||
// NUC472: End of destination address
|
||||
// M451: Start of destination address
|
||||
// NANO130: Start of destination address
|
||||
PDMA_DAR_INC); // Destination address incremental
|
||||
#if 0 // NOTE:
|
||||
// NANO130: No burst type setting
|
||||
PDMA_SetBurstType(obj->spi.dma_chn_id_rx,
|
||||
PDMA_REQ_SINGLE, // Single mode
|
||||
0); // Burst size
|
||||
#endif
|
||||
PDMA_EnableInt(obj->spi.dma_chn_id_rx,
|
||||
PDMA_IER_TD_IE_Msk); // Interrupt type
|
||||
// Register DMA event handler
|
||||
dma_set_handler(obj->spi.dma_chn_id_rx, (uint32_t) spi_dma_handler_rx, (uint32_t) obj, DMA_EVENT_ALL);
|
||||
|
||||
// Start tx/rx DMA transfer
|
||||
spi_enable_vector_interrupt(obj, handler, 1);
|
||||
// No TX/RX FIFO threshold interrupt
|
||||
spi_master_enable_interrupt(obj, 0, SPI_FIFO_RX_INTEN_MASK | SPI_FIFO_TX_INTEN_MASK);
|
||||
// NOTE: It is safer to start rx DMA first and then tx DMA. Otherwise, receive FIFO is subject to overflow by tx DMA.
|
||||
SPI_TRIGGER_RX_PDMA(((SPI_T *) NU_MODBASE(obj->spi.spi)));
|
||||
SPI_TRIGGER_TX_PDMA(((SPI_T *) NU_MODBASE(obj->spi.spi)));
|
||||
PDMA_Trigger(obj->spi.dma_chn_id_rx);
|
||||
PDMA_Trigger(obj->spi.dma_chn_id_tx);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Abort an SPI transfer
|
||||
* This is a helper function for event handling. When any of the events listed occurs, the HAL will abort any ongoing
|
||||
* transfers
|
||||
* @param[in] obj The SPI peripheral to stop
|
||||
*/
|
||||
void spi_abort_asynch(spi_t *obj)
|
||||
{
|
||||
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
|
||||
|
||||
if (obj->spi.dma_usage != DMA_USAGE_NEVER) {
|
||||
// Receive FIFO Overrun in case of tx length > rx length on DMA way
|
||||
if (spi_base->STATUS & SPI_STATUS_RX_OVER_RUN_Msk) {
|
||||
spi_base->STATUS = SPI_STATUS_RX_OVER_RUN_Msk;
|
||||
}
|
||||
|
||||
if (obj->spi.dma_chn_id_tx != DMA_ERROR_OUT_OF_CHANNELS) {
|
||||
PDMA_DisableInt(obj->spi.dma_chn_id_tx, PDMA_IER_TD_IE_Msk);
|
||||
// FIXME: On NUC472, next PDMA transfer will fail with PDMA_STOP() called. Cause is unknown.
|
||||
//PDMA_STOP(obj->spi.dma_chn_id_tx);
|
||||
dma_enable(obj->spi.dma_chn_id_tx, 0);
|
||||
}
|
||||
//SPI_DISABLE_TX_PDMA(((SPI_T *) NU_MODBASE(obj->spi.spi)));
|
||||
spi_base->DMA &= ~SPI_DMA_TX_DMA_EN_Msk;
|
||||
|
||||
if (obj->spi.dma_chn_id_rx != DMA_ERROR_OUT_OF_CHANNELS) {
|
||||
PDMA_DisableInt(obj->spi.dma_chn_id_rx, PDMA_IER_TD_IE_Msk);
|
||||
// FIXME: On NUC472, next PDMA transfer will fail with PDMA_STOP() called. Cause is unknown.
|
||||
//PDMA_STOP(obj->spi.dma_chn_id_rx);
|
||||
dma_enable(obj->spi.dma_chn_id_rx, 0);
|
||||
}
|
||||
//SPI_DISABLE_RX_PDMA(((SPI_T *) NU_MODBASE(obj->spi.spi)));
|
||||
spi_base->DMA &= ~SPI_DMA_RX_DMA_EN_Msk;
|
||||
}
|
||||
|
||||
// Necessary for both interrupt way and DMA way
|
||||
spi_enable_vector_interrupt(obj, 0, 0);
|
||||
spi_master_enable_interrupt(obj, 0, SPI_FIFO_RX_INTEN_MASK | SPI_FIFO_TX_INTEN_MASK);
|
||||
|
||||
// FIXME: SPI H/W may get out of state without the busy check.
|
||||
while (SPI_IS_BUSY(spi_base));
|
||||
|
||||
SPI_ClearRxFIFO(spi_base);
|
||||
SPI_ClearTxFIFO(spi_base);
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle the SPI interrupt
|
||||
* Read frames until the RX FIFO is empty. Write at most as many frames as were read. This way,
|
||||
* it is unlikely that the RX FIFO will overflow.
|
||||
* @param[in] obj The SPI peripheral that generated the interrupt
|
||||
* @return
|
||||
*/
|
||||
uint32_t spi_irq_handler_asynch(spi_t *obj)
|
||||
{
|
||||
// Check for SPI events
|
||||
uint32_t event = spi_event_check(obj);
|
||||
if (event) {
|
||||
spi_abort_asynch(obj);
|
||||
}
|
||||
|
||||
return (obj->spi.event & event) | ((event & SPI_EVENT_COMPLETE) ? SPI_EVENT_INTERNAL_TRANSFER_COMPLETE : 0);
|
||||
}
|
||||
|
||||
uint8_t spi_active(spi_t *obj)
|
||||
{
|
||||
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
|
||||
// FIXME
|
||||
/*
|
||||
if ((obj->rx_buff.buffer && obj->rx_buff.pos < obj->rx_buff.length)
|
||||
|| (obj->tx_buff.buffer && obj->tx_buff.pos < obj->tx_buff.length) ){
|
||||
return 1;
|
||||
} else {
|
||||
// interrupts are disabled, all transaction have been completed
|
||||
// TODO: checking rx fifo, it reports data eventhough RFDF is not set
|
||||
return DSPI_HAL_GetIntMode(obj->spi.address, kDspiRxFifoDrainRequest);
|
||||
}*/
|
||||
|
||||
return SPI_IS_BUSY(spi_base);
|
||||
}
|
||||
|
||||
int spi_allow_powerdown(void)
|
||||
{
|
||||
uint32_t modinit_mask = spi_modinit_mask;
|
||||
while (modinit_mask) {
|
||||
int spi_idx = nu_ctz(modinit_mask);
|
||||
const struct nu_modinit_s *modinit = spi_modinit_tab + spi_idx;
|
||||
if (modinit->modname != NC) {
|
||||
SPI_T *spi_base = (SPI_T *) NU_MODBASE(modinit->modname);
|
||||
if (SPI_IS_BUSY(spi_base)) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
modinit_mask &= ~(1 << spi_idx);
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
void SPI0_IRQHandler(void)
|
||||
{
|
||||
spi_irq(spi0_var.obj);
|
||||
}
|
||||
void SPI1_IRQHandler(void)
|
||||
{
|
||||
spi_irq(spi1_var.obj);
|
||||
}
|
||||
void SPI2_IRQHandler(void)
|
||||
{
|
||||
spi_irq(spi2_var.obj);
|
||||
}
|
||||
static void spi_irq(spi_t *obj)
|
||||
{
|
||||
if (obj && obj->spi.hdlr_async) {
|
||||
void (*hdlr_async)(void) = (void(*)(void))(obj->spi.hdlr_async);
|
||||
hdlr_async();
|
||||
}
|
||||
}
|
||||
|
||||
static int spi_writeable(spi_t * obj)
|
||||
{
|
||||
// Receive FIFO must not be full to avoid receive FIFO overflow on next transmit/receive
|
||||
//return (! SPI_GET_TX_FIFO_FULL_FLAG(((SPI_T *) NU_MODBASE(obj->spi.spi)))) && (SPI_GET_RX_FIFO_COUNT(((SPI_T *) NU_MODBASE(obj->spi.spi))) < NU_SPI_FIFO_DEPTH);
|
||||
return (! SPI_GET_TX_FIFO_FULL_FLAG(((SPI_T *) NU_MODBASE(obj->spi.spi))));
|
||||
}
|
||||
|
||||
static int spi_readable(spi_t * obj)
|
||||
{
|
||||
return ! SPI_GET_RX_FIFO_EMPTY_FLAG(((SPI_T *) NU_MODBASE(obj->spi.spi)));
|
||||
}
|
||||
|
||||
static void spi_enable_event(spi_t *obj, uint32_t event, uint8_t enable)
|
||||
{
|
||||
obj->spi.event &= ~SPI_EVENT_ALL;
|
||||
obj->spi.event |= (event & SPI_EVENT_ALL);
|
||||
if (event & SPI_EVENT_RX_OVERFLOW) {
|
||||
SPI_EnableInt((SPI_T *) NU_MODBASE(obj->spi.spi), SPI_FIFO_RXOVR_INTEN_MASK);
|
||||
}
|
||||
}
|
||||
|
||||
static void spi_enable_vector_interrupt(spi_t *obj, uint32_t handler, uint8_t enable)
|
||||
{
|
||||
const struct nu_modinit_s *modinit = get_modinit(obj->spi.spi, spi_modinit_tab);
|
||||
MBED_ASSERT(modinit != NULL);
|
||||
MBED_ASSERT(modinit->modname == obj->spi.spi);
|
||||
|
||||
struct nu_spi_var *var = (struct nu_spi_var *) modinit->var;
|
||||
|
||||
if (enable) {
|
||||
var->obj = obj;
|
||||
obj->spi.hdlr_async = handler;
|
||||
NVIC_SetVector(modinit->irq_n, (uint32_t) var->vec);
|
||||
NVIC_EnableIRQ(modinit->irq_n);
|
||||
}
|
||||
else {
|
||||
NVIC_DisableIRQ(modinit->irq_n);
|
||||
var->obj = NULL;
|
||||
obj->spi.hdlr_async = handler;
|
||||
}
|
||||
}
|
||||
|
||||
static void spi_master_enable_interrupt(spi_t *obj, uint8_t enable, uint32_t mask)
|
||||
{
|
||||
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
|
||||
|
||||
// NOTE: On NANO130, SPI_IE_MASK/SPI_STATUS_INTSTS_Msk are for unit transfer IE/EF. Don't get confused.
|
||||
if (enable) {
|
||||
//SPI_SET_SUSPEND_CYCLE(spi_base, 4);
|
||||
// Enable tx/rx FIFO threshold interrupt
|
||||
SPI_EnableInt(spi_base, mask);
|
||||
}
|
||||
else {
|
||||
SPI_DisableInt(spi_base, mask);
|
||||
}
|
||||
}
|
||||
|
||||
static uint32_t spi_event_check(spi_t *obj)
|
||||
{
|
||||
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
|
||||
uint32_t event = 0;
|
||||
|
||||
if (obj->spi.dma_usage == DMA_USAGE_NEVER) {
|
||||
uint32_t n_rec = spi_master_read_asynch(obj);
|
||||
spi_master_write_asynch(obj, n_rec);
|
||||
}
|
||||
|
||||
if (spi_is_tx_complete(obj) && spi_is_rx_complete(obj)) {
|
||||
event |= SPI_EVENT_COMPLETE;
|
||||
}
|
||||
|
||||
// Receive FIFO Overrun
|
||||
if (spi_base->STATUS & SPI_STATUS_RX_OVER_RUN_Msk) {
|
||||
spi_base->STATUS = SPI_STATUS_RX_OVER_RUN_Msk;
|
||||
// In case of tx length > rx length on DMA way
|
||||
if (obj->spi.dma_usage == DMA_USAGE_NEVER) {
|
||||
event |= SPI_EVENT_RX_OVERFLOW;
|
||||
}
|
||||
}
|
||||
|
||||
// Receive Time-Out
|
||||
if (spi_base->STATUS & SPI_STATUS_TIME_OUT_STS_Msk) {
|
||||
spi_base->STATUS = SPI_STATUS_TIME_OUT_STS_Msk;
|
||||
//event |= SPI_EVENT_ERROR;
|
||||
}
|
||||
#if 0 // NOTE:
|
||||
// NANO130: No FIFO Under-RUN IF
|
||||
// Transmit FIFO Under-Run
|
||||
if (spi_base->STATUS & SPI_STATUS_TXUFIF_Msk) {
|
||||
spi_base->STATUS = SPI_STATUS_TXUFIF_Msk;
|
||||
event |= SPI_EVENT_ERROR;
|
||||
}
|
||||
#endif
|
||||
|
||||
return event;
|
||||
}
|
||||
|
||||
/**
|
||||
* Send words from the SPI TX buffer until the send limit is reached or the TX FIFO is full
|
||||
* tx_limit is provided to ensure that the number of SPI frames (words) in flight can be managed.
|
||||
* @param[in] obj The SPI object on which to operate
|
||||
* @param[in] tx_limit The maximum number of words to send
|
||||
* @return The number of SPI words that have been transfered
|
||||
*/
|
||||
static uint32_t spi_master_write_asynch(spi_t *obj, uint32_t tx_limit)
|
||||
{
|
||||
uint32_t n_words = 0;
|
||||
uint32_t tx_rmn = obj->tx_buff.length - obj->tx_buff.pos;
|
||||
uint32_t rx_rmn = obj->rx_buff.length - obj->rx_buff.pos;
|
||||
uint32_t max_tx = NU_MAX(tx_rmn, rx_rmn);
|
||||
max_tx = NU_MIN(max_tx, tx_limit);
|
||||
uint8_t data_width = spi_get_data_width(obj);
|
||||
uint8_t bytes_per_word = (data_width + 7) / 8;
|
||||
uint8_t *tx = (uint8_t *)(obj->tx_buff.buffer) + bytes_per_word * obj->tx_buff.pos;
|
||||
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
|
||||
uint32_t TX = (NU_MODSUBINDEX(obj->spi.spi) == 0) ? ((uint32_t) &spi_base->TX0) : ((uint32_t) &spi_base->TX1);
|
||||
|
||||
while ((n_words < max_tx) && spi_writeable(obj)) {
|
||||
if (spi_is_tx_complete(obj)) {
|
||||
// Transmit dummy as transmit buffer is empty
|
||||
M32(TX) = 0;
|
||||
}
|
||||
else {
|
||||
switch (bytes_per_word) {
|
||||
case 4:
|
||||
M32(TX) = nu_get32_le(tx);
|
||||
tx += 4;
|
||||
break;
|
||||
case 2:
|
||||
M32(TX) = nu_get16_le(tx);
|
||||
tx += 2;
|
||||
break;
|
||||
case 1:
|
||||
M32(TX) = *((uint8_t *) tx);
|
||||
tx += 1;
|
||||
break;
|
||||
}
|
||||
|
||||
obj->tx_buff.pos ++;
|
||||
}
|
||||
n_words ++;
|
||||
}
|
||||
|
||||
//Return the number of words that have been sent
|
||||
return n_words;
|
||||
}
|
||||
|
||||
/**
|
||||
* Read SPI words out of the RX FIFO
|
||||
* Continues reading words out of the RX FIFO until the following condition is met:
|
||||
* o There are no more words in the FIFO
|
||||
* OR BOTH OF:
|
||||
* o At least as many words as the TX buffer have been received
|
||||
* o At least as many words as the RX buffer have been received
|
||||
* This way, RX overflows are not generated when the TX buffer size exceeds the RX buffer size
|
||||
* @param[in] obj The SPI object on which to operate
|
||||
* @return Returns the number of words extracted from the RX FIFO
|
||||
*/
|
||||
static uint32_t spi_master_read_asynch(spi_t *obj)
|
||||
{
|
||||
uint32_t n_words = 0;
|
||||
uint32_t tx_rmn = obj->tx_buff.length - obj->tx_buff.pos;
|
||||
uint32_t rx_rmn = obj->rx_buff.length - obj->rx_buff.pos;
|
||||
uint32_t max_rx = NU_MAX(tx_rmn, rx_rmn);
|
||||
uint8_t data_width = spi_get_data_width(obj);
|
||||
uint8_t bytes_per_word = (data_width + 7) / 8;
|
||||
uint8_t *rx = (uint8_t *)(obj->rx_buff.buffer) + bytes_per_word * obj->rx_buff.pos;
|
||||
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
|
||||
uint32_t RX = (NU_MODSUBINDEX(obj->spi.spi) == 0) ? ((uint32_t) &spi_base->RX0) : ((uint32_t) &spi_base->RX1);
|
||||
|
||||
while ((n_words < max_rx) && spi_readable(obj)) {
|
||||
if (spi_is_rx_complete(obj)) {
|
||||
// Disregard as receive buffer is full
|
||||
M32(RX);
|
||||
}
|
||||
else {
|
||||
switch (bytes_per_word) {
|
||||
case 4: {
|
||||
uint32_t val = M32(RX);
|
||||
nu_set32_le(rx, val);
|
||||
rx += 4;
|
||||
break;
|
||||
}
|
||||
case 2: {
|
||||
uint16_t val = M32(RX);
|
||||
nu_set16_le(rx, val);
|
||||
rx += 2;
|
||||
break;
|
||||
}
|
||||
case 1:
|
||||
*rx ++ = M32(RX);
|
||||
break;
|
||||
}
|
||||
|
||||
obj->rx_buff.pos ++;
|
||||
}
|
||||
n_words ++;
|
||||
}
|
||||
|
||||
// Return the number of words received
|
||||
return n_words;
|
||||
}
|
||||
|
||||
static void spi_buffer_set(spi_t *obj, const void *tx, size_t tx_length, void *rx, size_t rx_length)
|
||||
{
|
||||
obj->tx_buff.buffer = (void *) tx;
|
||||
obj->tx_buff.length = tx_length;
|
||||
obj->tx_buff.pos = 0;
|
||||
obj->tx_buff.width = spi_get_data_width(obj);
|
||||
obj->rx_buff.buffer = rx;
|
||||
obj->rx_buff.length = rx_length;
|
||||
obj->rx_buff.pos = 0;
|
||||
obj->rx_buff.width = spi_get_data_width(obj);
|
||||
}
|
||||
|
||||
static void spi_check_dma_usage(DMAUsage *dma_usage, int *dma_ch_tx, int *dma_ch_rx)
|
||||
{
|
||||
if (*dma_usage != DMA_USAGE_NEVER) {
|
||||
if (*dma_ch_tx == DMA_ERROR_OUT_OF_CHANNELS) {
|
||||
*dma_ch_tx = dma_channel_allocate(DMA_CAP_NONE);
|
||||
}
|
||||
if (*dma_ch_rx == DMA_ERROR_OUT_OF_CHANNELS) {
|
||||
*dma_ch_rx = dma_channel_allocate(DMA_CAP_NONE);
|
||||
}
|
||||
|
||||
if (*dma_ch_tx == DMA_ERROR_OUT_OF_CHANNELS || *dma_ch_rx == DMA_ERROR_OUT_OF_CHANNELS) {
|
||||
*dma_usage = DMA_USAGE_NEVER;
|
||||
}
|
||||
}
|
||||
|
||||
if (*dma_usage == DMA_USAGE_NEVER) {
|
||||
dma_channel_free(*dma_ch_tx);
|
||||
*dma_ch_tx = DMA_ERROR_OUT_OF_CHANNELS;
|
||||
dma_channel_free(*dma_ch_rx);
|
||||
*dma_ch_rx = DMA_ERROR_OUT_OF_CHANNELS;
|
||||
}
|
||||
}
|
||||
|
||||
static uint8_t spi_get_data_width(spi_t *obj)
|
||||
{
|
||||
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
|
||||
|
||||
uint32_t data_width = ((spi_base->CTL & SPI_CTL_TX_BIT_LEN_Msk) >> SPI_CTL_TX_BIT_LEN_Pos);
|
||||
if (data_width == 0) {
|
||||
data_width = 32;
|
||||
}
|
||||
|
||||
return data_width;
|
||||
}
|
||||
|
||||
static int spi_is_tx_complete(spi_t *obj)
|
||||
{
|
||||
// ???: Exclude tx fifo empty check due to no such interrupt on DMA way
|
||||
return (obj->tx_buff.pos == obj->tx_buff.length);
|
||||
//return (obj->tx_buff.pos == obj->tx_buff.length && SPI_GET_TX_FIFO_EMPTY_FLAG(((SPI_T *) NU_MODBASE(obj->spi.spi))));
|
||||
}
|
||||
|
||||
static int spi_is_rx_complete(spi_t *obj)
|
||||
{
|
||||
return (obj->rx_buff.pos == obj->rx_buff.length);
|
||||
}
|
||||
|
||||
static void spi_dma_handler_tx(uint32_t id, uint32_t event_dma)
|
||||
{
|
||||
spi_t *obj = (spi_t *) id;
|
||||
|
||||
// FIXME: Pass this error to caller
|
||||
if (event_dma & DMA_EVENT_ABORT) {
|
||||
}
|
||||
// Expect SPI IRQ will catch this transfer done event
|
||||
if (event_dma & DMA_EVENT_TRANSFER_DONE) {
|
||||
obj->tx_buff.pos = obj->tx_buff.length;
|
||||
}
|
||||
// FIXME: Pass this error to caller
|
||||
if (event_dma & DMA_EVENT_TIMEOUT) {
|
||||
}
|
||||
|
||||
const struct nu_modinit_s *modinit = get_modinit(obj->spi.spi, spi_modinit_tab);
|
||||
MBED_ASSERT(modinit != NULL);
|
||||
MBED_ASSERT(modinit->modname == obj->spi.spi);
|
||||
|
||||
void (*vec)(void) = (void (*)(void)) NVIC_GetVector(modinit->irq_n);
|
||||
vec();
|
||||
}
|
||||
|
||||
static void spi_dma_handler_rx(uint32_t id, uint32_t event_dma)
|
||||
{
|
||||
spi_t *obj = (spi_t *) id;
|
||||
|
||||
// FIXME: Pass this error to caller
|
||||
if (event_dma & DMA_EVENT_ABORT) {
|
||||
}
|
||||
// Expect SPI IRQ will catch this transfer done event
|
||||
if (event_dma & DMA_EVENT_TRANSFER_DONE) {
|
||||
obj->rx_buff.pos = obj->rx_buff.length;
|
||||
}
|
||||
// FIXME: Pass this error to caller
|
||||
if (event_dma & DMA_EVENT_TIMEOUT) {
|
||||
}
|
||||
|
||||
const struct nu_modinit_s *modinit = get_modinit(obj->spi.spi, spi_modinit_tab);
|
||||
MBED_ASSERT(modinit != NULL);
|
||||
MBED_ASSERT(modinit->modname == obj->spi.spi);
|
||||
|
||||
void (*vec)(void) = (void (*)(void)) NVIC_GetVector(modinit->irq_n);
|
||||
vec();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
|
@ -2633,7 +2633,7 @@
|
|||
"supported_toolchains": ["ARM", "uARM", "GCC_ARM", "IAR"],
|
||||
"inherits": ["Target"],
|
||||
"progen": {"target": "numaker-pfm-nano130"},
|
||||
"device_has": ["INTERRUPTIN", "PORTIN", "PORTINOUT", "PORTOUT", "SERIAL", "SERIAL_ASYNCH", "SERIAL_FC", "STDIO_MESSAGES", "I2C", "I2CSLAVE", "I2C_ASYNCH"],
|
||||
"device_has": ["INTERRUPTIN", "PORTIN", "PORTINOUT", "PORTOUT", "SERIAL", "SERIAL_ASYNCH", "SERIAL_FC", "STDIO_MESSAGES", "I2C", "I2CSLAVE", "I2C_ASYNCH", "SPI", "SPISLAVE", "SPI_ASYNCH"],
|
||||
"release_versions": ["5"],
|
||||
"device_name": "NANO130KE3BN"
|
||||
},
|
||||
|
|
Loading…
Reference in New Issue