[Nuvoton] Refine SPI code

1. Remove dead code
2. Remove space in empty lines
3. Fix compile warnings
4. Fix some comments
pull/6466/head
ccli8 2018-03-26 11:02:54 +08:00
parent 7275ee8626
commit 707de87497
4 changed files with 195 additions and 218 deletions

View File

@ -112,7 +112,7 @@ static const struct nu_modinit_s spi_modinit_tab[] = {
{SPI_0, SPI0_MODULE, CLK_CLKSEL2_SPI0SEL_PCLK0, MODULE_NoMsk, SPI0_RST, SPI0_IRQn, &spi0_var},
{SPI_1, SPI1_MODULE, CLK_CLKSEL2_SPI1SEL_PCLK1, MODULE_NoMsk, SPI1_RST, SPI1_IRQn, &spi1_var},
{SPI_2, SPI2_MODULE, CLK_CLKSEL2_SPI2SEL_PCLK0, MODULE_NoMsk, SPI2_RST, SPI2_IRQn, &spi2_var},
{NC, 0, 0, 0, 0, (IRQn_Type) 0, NULL}
};
@ -129,33 +129,27 @@ void spi_init(spi_t *obj, PinName mosi, PinName miso, PinName sclk, PinName ssel
const struct nu_modinit_s *modinit = get_modinit(obj->spi.spi, spi_modinit_tab);
MBED_ASSERT(modinit != NULL);
MBED_ASSERT(modinit->modname == obj->spi.spi);
MBED_ASSERT(modinit->modname == (int) obj->spi.spi);
// Reset this module
SYS_ResetModule(modinit->rsetidx);
// Select IP clock source
CLK_SetModuleClock(modinit->clkidx, modinit->clksrc, modinit->clkdiv);
// Enable IP clock
CLK_EnableModuleClock(modinit->clkidx);
//SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
pinmap_pinout(mosi, PinMap_SPI_MOSI);
pinmap_pinout(miso, PinMap_SPI_MISO);
pinmap_pinout(sclk, PinMap_SPI_SCLK);
pinmap_pinout(ssel, PinMap_SPI_SSEL);
obj->spi.pin_mosi = mosi;
obj->spi.pin_miso = miso;
obj->spi.pin_sclk = sclk;
obj->spi.pin_ssel = ssel;
// Configure the SPI data format and frequency
//spi_format(obj, 8, 0, SPI_MSB); // 8 bits, mode 0
//spi_frequency(obj, 1000000);
#if DEVICE_SPI_ASYNCH
obj->spi.dma_usage = DMA_USAGE_NEVER;
obj->spi.event = 0;
@ -187,29 +181,28 @@ void spi_free(spi_t *obj)
#endif
SPI_Close((SPI_T *) NU_MODBASE(obj->spi.spi));
const struct nu_modinit_s *modinit = get_modinit(obj->spi.spi, spi_modinit_tab);
MBED_ASSERT(modinit != NULL);
MBED_ASSERT(modinit->modname == obj->spi.spi);
MBED_ASSERT(modinit->modname == (int) obj->spi.spi);
SPI_DisableInt(((SPI_T *) NU_MODBASE(obj->spi.spi)), (SPI_FIFO_RXOV_INT_MASK | SPI_FIFO_RXTH_INT_MASK | SPI_FIFO_TXTH_INT_MASK));
NVIC_DisableIRQ(modinit->irq_n);
// Disable IP clock
CLK_DisableModuleClock(modinit->clkidx);
//((struct nu_spi_var *) modinit->var)->obj = NULL;
// Mark this module to be deinited.
int i = modinit - spi_modinit_tab;
spi_modinit_mask &= ~(1 << i);
}
void spi_format(spi_t *obj, int bits, int mode, int slave)
{
MBED_ASSERT(bits >= NU_SPI_FRAME_MIN && bits <= NU_SPI_FRAME_MAX);
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
SPI_DISABLE_SYNC(spi_base);
SPI_Open(spi_base,
@ -223,7 +216,7 @@ void spi_format(spi_t *obj, int bits, int mode, int slave)
if (! slave) {
// Master
if (obj->spi.pin_ssel != NC) {
// Configure SS as low active.
// Configure SS as low active.
SPI_EnableAutoSS(spi_base, SPI_SS, SPI_SS_ACTIVE_LOW);
}
else {
@ -244,7 +237,7 @@ void spi_format(spi_t *obj, int bits, int mode, int slave)
void spi_frequency(spi_t *obj, int hz)
{
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
SPI_DISABLE_SYNC(spi_base);
SPI_SetBusClock((SPI_T *) NU_MODBASE(obj->spi.spi), hz);
@ -254,10 +247,10 @@ void spi_frequency(spi_t *obj, int hz)
int spi_master_write(spi_t *obj, int value)
{
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
// NOTE: Data in receive FIFO can be read out via ICE.
SPI_ENABLE_SYNC(spi_base);
// Wait for tx buffer empty
while(! spi_writeable(obj));
SPI_WRITE_TX(spi_base, value);
@ -290,18 +283,18 @@ int spi_master_block_write(spi_t *obj, const char *tx_buffer, int tx_length,
int spi_slave_receive(spi_t *obj)
{
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
SPI_ENABLE_SYNC(spi_base);
return spi_readable(obj);
};
int spi_slave_read(spi_t *obj)
{
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
SPI_ENABLE_SYNC(spi_base);
// Wait for rx buffer full
while (! spi_readable(obj));
int value = SPI_READ_RX(spi_base);
@ -311,9 +304,9 @@ int spi_slave_read(spi_t *obj)
void spi_slave_write(spi_t *obj, int value)
{
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
SPI_ENABLE_SYNC(spi_base);
// Wait for tx buffer empty
while(! spi_writeable(obj));
SPI_WRITE_TX(spi_base, value);
@ -323,7 +316,6 @@ void spi_slave_write(spi_t *obj, int value)
#if DEVICE_SPI_ASYNCH
void spi_master_transfer(spi_t *obj, const void *tx, size_t tx_length, void *rx, size_t rx_length, uint8_t bit_width, uint32_t handler, uint32_t event, DMAUsage hint)
{
//MBED_ASSERT(bits >= NU_SPI_FRAME_MIN && bits <= NU_SPI_FRAME_MAX);
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
SPI_SET_DATA_WIDTH(spi_base, bit_width);
@ -341,13 +333,13 @@ void spi_master_transfer(spi_t *obj, const void *tx, size_t tx_length, void *rx,
dma_channel_free(obj->spi.dma_chn_id_rx);
obj->spi.dma_chn_id_rx = DMA_ERROR_OUT_OF_CHANNELS;
}
// SPI IRQ is necessary for both interrupt way and DMA way
spi_enable_event(obj, event, 1);
spi_buffer_set(obj, tx, tx_length, rx, rx_length);
SPI_ENABLE_SYNC(spi_base);
if (obj->spi.dma_usage == DMA_USAGE_NEVER) {
// Interrupt way
spi_master_write_asynch(obj, NU_SPI_FIFO_DEPTH / 2);
@ -357,10 +349,10 @@ void spi_master_transfer(spi_t *obj, const void *tx, size_t tx_length, void *rx,
// DMA way
const struct nu_modinit_s *modinit = get_modinit(obj->spi.spi, spi_modinit_tab);
MBED_ASSERT(modinit != NULL);
MBED_ASSERT(modinit->modname == obj->spi.spi);
MBED_ASSERT(modinit->modname == (int) obj->spi.spi);
PDMA_T *pdma_base = dma_modbase();
// Configure tx DMA
pdma_base->CHCTL |= 1 << obj->spi.dma_chn_id_tx; // Enable this DMA channel
PDMA_SetTransferMode(obj->spi.dma_chn_id_tx,
@ -384,7 +376,7 @@ void spi_master_transfer(spi_t *obj, const void *tx, size_t tx_length, void *rx,
PDMA_INT_TRANS_DONE); // Interrupt type
// Register DMA event handler
dma_set_handler(obj->spi.dma_chn_id_tx, (uint32_t) spi_dma_handler_tx, (uint32_t) obj, DMA_EVENT_ALL);
// Configure rx DMA
pdma_base->CHCTL |= 1 << obj->spi.dma_chn_id_rx; // Enable this DMA channel
PDMA_SetTransferMode(obj->spi.dma_chn_id_rx,
@ -460,37 +452,35 @@ void spi_abort_asynch(spi_t *obj)
{
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
PDMA_T *pdma_base = dma_modbase();
if (obj->spi.dma_usage != DMA_USAGE_NEVER) {
// Receive FIFO Overrun in case of tx length > rx length on DMA way
if (spi_base->STATUS & SPI_STATUS_RXOVIF_Msk) {
spi_base->STATUS = SPI_STATUS_RXOVIF_Msk;
}
if (obj->spi.dma_chn_id_tx != DMA_ERROR_OUT_OF_CHANNELS) {
PDMA_DisableInt(obj->spi.dma_chn_id_tx, PDMA_INT_TRANS_DONE);
// FIXME: On NUC472, next PDMA transfer will fail with PDMA_STOP() called. Cause is unknown.
//PDMA_STOP(obj->spi.dma_chn_id_tx);
// NOTE: On NUC472, next PDMA transfer will fail with PDMA_STOP() called. Cause is unknown.
pdma_base->CHCTL &= ~(1 << obj->spi.dma_chn_id_tx);
}
SPI_DISABLE_TX_PDMA(((SPI_T *) NU_MODBASE(obj->spi.spi)));
if (obj->spi.dma_chn_id_rx != DMA_ERROR_OUT_OF_CHANNELS) {
PDMA_DisableInt(obj->spi.dma_chn_id_rx, PDMA_INT_TRANS_DONE);
// FIXME: On NUC472, next PDMA transfer will fail with PDMA_STOP() called. Cause is unknown.
//PDMA_STOP(obj->spi.dma_chn_id_rx);
// NOTE: On NUC472, next PDMA transfer will fail with PDMA_STOP() called. Cause is unknown.
pdma_base->CHCTL &= ~(1 << obj->spi.dma_chn_id_rx);
}
SPI_DISABLE_RX_PDMA(((SPI_T *) NU_MODBASE(obj->spi.spi)));
}
// Necessary for both interrupt way and DMA way
spi_enable_vector_interrupt(obj, 0, 0);
spi_master_enable_interrupt(obj, 0);
/* Necessary for accessing FIFOCTL below */
SPI_DISABLE_SYNC(spi_base);
SPI_ClearRxFIFO(spi_base);
SPI_ClearTxFIFO(spi_base);
}
@ -528,7 +518,6 @@ uint8_t spi_active(spi_t *obj)
static int spi_writeable(spi_t * obj)
{
// Receive FIFO must not be full to avoid receive FIFO overflow on next transmit/receive
//return (! SPI_GET_TX_FIFO_FULL_FLAG(((SPI_T *) NU_MODBASE(obj->spi.spi)))) && (SPI_GET_RX_FIFO_COUNT(((SPI_T *) NU_MODBASE(obj->spi.spi))) < NU_SPI_FIFO_DEPTH);
return (! SPI_GET_TX_FIFO_FULL_FLAG(((SPI_T *) NU_MODBASE(obj->spi.spi))));
}
@ -538,7 +527,7 @@ static int spi_readable(spi_t * obj)
}
static void spi_enable_event(spi_t *obj, uint32_t event, uint8_t enable)
{
{
obj->spi.event &= ~SPI_EVENT_ALL;
obj->spi.event |= (event & SPI_EVENT_ALL);
if (event & SPI_EVENT_RX_OVERFLOW) {
@ -550,8 +539,8 @@ static void spi_enable_vector_interrupt(spi_t *obj, uint32_t handler, uint8_t en
{
const struct nu_modinit_s *modinit = get_modinit(obj->spi.spi, spi_modinit_tab);
MBED_ASSERT(modinit != NULL);
MBED_ASSERT(modinit->modname == obj->spi.spi);
MBED_ASSERT(modinit->modname == (int) obj->spi.spi);
if (enable) {
NVIC_SetVector(modinit->irq_n, handler);
NVIC_EnableIRQ(modinit->irq_n);
@ -563,9 +552,9 @@ static void spi_enable_vector_interrupt(spi_t *obj, uint32_t handler, uint8_t en
}
static void spi_master_enable_interrupt(spi_t *obj, uint8_t enable)
{
{
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
if (enable) {
// For SPI0, it could be 0 ~ 7. For SPI1 and SPI2, it could be 0 ~ 3.
if (spi_base == (SPI_T *) SPI0_BASE) {
@ -574,7 +563,7 @@ static void spi_master_enable_interrupt(spi_t *obj, uint8_t enable)
else {
SPI_SetFIFO(spi_base, 2, 2);
}
//SPI_SET_SUSPEND_CYCLE(spi_base, 4);
// Enable tx/rx FIFO threshold interrupt
SPI_EnableInt(spi_base, SPI_FIFO_RXTH_INT_MASK | SPI_FIFO_TXTH_INT_MASK);
}
@ -587,16 +576,16 @@ static uint32_t spi_event_check(spi_t *obj)
{
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
uint32_t event = 0;
if (obj->spi.dma_usage == DMA_USAGE_NEVER) {
uint32_t n_rec = spi_master_read_asynch(obj);
spi_master_write_asynch(obj, n_rec);
}
if (spi_is_tx_complete(obj) && spi_is_rx_complete(obj)) {
event |= SPI_EVENT_COMPLETE;
}
// Receive FIFO Overrun
if (spi_base->STATUS & SPI_STATUS_RXOVIF_Msk) {
spi_base->STATUS = SPI_STATUS_RXOVIF_Msk;
@ -605,7 +594,7 @@ static uint32_t spi_event_check(spi_t *obj)
event |= SPI_EVENT_RX_OVERFLOW;
}
}
// Receive Time-Out
if (spi_base->STATUS & SPI_STATUS_RXTOIF_Msk) {
spi_base->STATUS = SPI_STATUS_RXTOIF_Msk;
@ -616,7 +605,7 @@ static uint32_t spi_event_check(spi_t *obj)
spi_base->STATUS = SPI_STATUS_TXUFIF_Msk;
event |= SPI_EVENT_ERROR;
}
return event;
}
@ -638,7 +627,7 @@ static uint32_t spi_master_write_asynch(spi_t *obj, uint32_t tx_limit)
uint8_t bytes_per_word = (data_width + 7) / 8;
uint8_t *tx = (uint8_t *)(obj->tx_buff.buffer) + bytes_per_word * obj->tx_buff.pos;
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
while ((n_words < max_tx) && spi_writeable(obj)) {
if (spi_is_tx_complete(obj)) {
// Transmit dummy as transmit buffer is empty
@ -659,12 +648,12 @@ static uint32_t spi_master_write_asynch(spi_t *obj, uint32_t tx_limit)
tx += 1;
break;
}
obj->tx_buff.pos ++;
}
n_words ++;
}
//Return the number of words that have been sent
return n_words;
}
@ -690,7 +679,7 @@ static uint32_t spi_master_read_asynch(spi_t *obj)
uint8_t bytes_per_word = (data_width + 7) / 8;
uint8_t *rx = (uint8_t *)(obj->rx_buff.buffer) + bytes_per_word * obj->rx_buff.pos;
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
while ((n_words < max_rx) && spi_readable(obj)) {
if (spi_is_rx_complete(obj)) {
// Disregard as receive buffer is full
@ -714,12 +703,12 @@ static uint32_t spi_master_read_asynch(spi_t *obj)
*rx ++ = SPI_READ_RX(spi_base);
break;
}
obj->rx_buff.pos ++;
}
n_words ++;
}
// Return the number of words received
return n_words;
}
@ -745,12 +734,12 @@ static void spi_check_dma_usage(DMAUsage *dma_usage, int *dma_ch_tx, int *dma_ch
if (*dma_ch_rx == DMA_ERROR_OUT_OF_CHANNELS) {
*dma_ch_rx = dma_channel_allocate(DMA_CAP_NONE);
}
if (*dma_ch_tx == DMA_ERROR_OUT_OF_CHANNELS || *dma_ch_rx == DMA_ERROR_OUT_OF_CHANNELS) {
*dma_usage = DMA_USAGE_NEVER;
}
}
if (*dma_usage == DMA_USAGE_NEVER) {
dma_channel_free(*dma_ch_tx);
*dma_ch_tx = DMA_ERROR_OUT_OF_CHANNELS;
@ -760,22 +749,20 @@ static void spi_check_dma_usage(DMAUsage *dma_usage, int *dma_ch_tx, int *dma_ch
}
static uint8_t spi_get_data_width(spi_t *obj)
{
{
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
uint32_t data_width = ((spi_base->CTL & SPI_CTL_DWIDTH_Msk) >> SPI_CTL_DWIDTH_Pos);
if (data_width == 0) {
data_width = 32;
}
return data_width;
}
static int spi_is_tx_complete(spi_t *obj)
{
// ???: Exclude tx fifo empty check due to no such interrupt on DMA way
return (obj->tx_buff.pos == obj->tx_buff.length);
//return (obj->tx_buff.pos == obj->tx_buff.length && SPI_GET_TX_FIFO_EMPTY_FLAG(((SPI_T *) NU_MODBASE(obj->spi.spi))));
}
static int spi_is_rx_complete(spi_t *obj)
@ -786,22 +773,22 @@ static int spi_is_rx_complete(spi_t *obj)
static void spi_dma_handler_tx(uint32_t id, uint32_t event_dma)
{
spi_t *obj = (spi_t *) id;
// FIXME: Pass this error to caller
// TODO: Pass this error to caller
if (event_dma & DMA_EVENT_ABORT) {
}
// Expect SPI IRQ will catch this transfer done event
if (event_dma & DMA_EVENT_TRANSFER_DONE) {
obj->tx_buff.pos = obj->tx_buff.length;
}
// FIXME: Pass this error to caller
// TODO: Pass this error to caller
if (event_dma & DMA_EVENT_TIMEOUT) {
}
const struct nu_modinit_s *modinit = get_modinit(obj->spi.spi, spi_modinit_tab);
MBED_ASSERT(modinit != NULL);
MBED_ASSERT(modinit->modname == obj->spi.spi);
MBED_ASSERT(modinit->modname == (int) obj->spi.spi);
void (*vec)(void) = (void (*)(void)) NVIC_GetVector(modinit->irq_n);
vec();
}
@ -809,22 +796,22 @@ static void spi_dma_handler_tx(uint32_t id, uint32_t event_dma)
static void spi_dma_handler_rx(uint32_t id, uint32_t event_dma)
{
spi_t *obj = (spi_t *) id;
// FIXME: Pass this error to caller
// TODO: Pass this error to caller
if (event_dma & DMA_EVENT_ABORT) {
}
// Expect SPI IRQ will catch this transfer done event
if (event_dma & DMA_EVENT_TRANSFER_DONE) {
obj->rx_buff.pos = obj->rx_buff.length;
}
// FIXME: Pass this error to caller
// TODO: Pass this error to caller
if (event_dma & DMA_EVENT_TIMEOUT) {
}
const struct nu_modinit_s *modinit = get_modinit(obj->spi.spi, spi_modinit_tab);
MBED_ASSERT(modinit != NULL);
MBED_ASSERT(modinit->modname == obj->spi.spi);
MBED_ASSERT(modinit->modname == (int) obj->spi.spi);
void (*vec)(void) = (void (*)(void)) NVIC_GetVector(modinit->irq_n);
vec();
}

View File

@ -211,6 +211,7 @@ void spi_free(spi_t *obj)
int i = modinit - spi_modinit_tab;
spi_modinit_mask &= ~(1 << i);
}
void spi_format(spi_t *obj, int bits, int mode, int slave)
{
MBED_ASSERT(bits >= NU_SPI_FRAME_MIN && bits <= NU_SPI_FRAME_MAX);
@ -776,14 +777,14 @@ static void spi_dma_handler_tx(uint32_t id, uint32_t event_dma)
{
spi_t *obj = (spi_t *) id;
// FIXME: Pass this error to caller
// TODO: Pass this error to caller
if (event_dma & DMA_EVENT_ABORT) {
}
// Expect SPI IRQ will catch this transfer done event
if (event_dma & DMA_EVENT_TRANSFER_DONE) {
obj->tx_buff.pos = obj->tx_buff.length;
}
// FIXME: Pass this error to caller
// TODO: Pass this error to caller
if (event_dma & DMA_EVENT_TIMEOUT) {
}
@ -799,14 +800,14 @@ static void spi_dma_handler_rx(uint32_t id, uint32_t event_dma)
{
spi_t *obj = (spi_t *) id;
// FIXME: Pass this error to caller
// TODO: Pass this error to caller
if (event_dma & DMA_EVENT_ABORT) {
}
// Expect SPI IRQ will catch this transfer done event
if (event_dma & DMA_EVENT_TRANSFER_DONE) {
obj->rx_buff.pos = obj->rx_buff.length;
}
// FIXME: Pass this error to caller
// TODO: Pass this error to caller
if (event_dma & DMA_EVENT_TIMEOUT) {
}

View File

@ -43,8 +43,8 @@ struct nu_spi_var {
#endif
};
// NOTE:
// NANO130: No support for relocating vector table. ISR vector passed into NVIC_SetVector() can only be weak symbol defined in startup_Nano100Series.c.
/* NOTE: NANO130 doesn't support relocating vector table. ISR vector passed into NVIC_SetVector() can
* only be weak symbol defined in startup_Nano100Series.c. */
void SPI0_IRQHandler(void);
void SPI1_IRQHandler(void);
void SPI2_IRQHandler(void);
@ -156,27 +156,27 @@ void spi_init(spi_t *obj, PinName mosi, PinName miso, PinName sclk, PinName ssel
const struct nu_modinit_s *modinit = get_modinit(obj->spi.spi, spi_modinit_tab);
MBED_ASSERT(modinit != NULL);
MBED_ASSERT((SPIName) modinit->modname == obj->spi.spi);
MBED_ASSERT(modinit->modname == (int) obj->spi.spi);
// Reset this module
SYS_ResetModule(modinit->rsetidx);
// Select IP clock source
CLK_SetModuleClock(modinit->clkidx, modinit->clksrc, modinit->clkdiv);
// Enable IP clock
CLK_EnableModuleClock(modinit->clkidx);
pinmap_pinout(mosi, PinMap_SPI_MOSI);
pinmap_pinout(miso, PinMap_SPI_MISO);
pinmap_pinout(sclk, PinMap_SPI_SCLK);
pinmap_pinout(ssel, PinMap_SPI_SSEL);
obj->spi.pin_mosi = mosi;
obj->spi.pin_miso = miso;
obj->spi.pin_sclk = sclk;
obj->spi.pin_ssel = ssel;
#if DEVICE_SPI_ASYNCH
obj->spi.dma_usage = DMA_USAGE_NEVER;
obj->spi.event = 0;
@ -213,26 +213,28 @@ void spi_free(spi_t *obj)
#endif
SPI_Close((SPI_T *) NU_MODBASE(obj->spi.spi));
const struct nu_modinit_s *modinit = get_modinit(obj->spi.spi, spi_modinit_tab);
MBED_ASSERT(modinit != NULL);
MBED_ASSERT((SPIName) modinit->modname == obj->spi.spi);
MBED_ASSERT(modinit->modname == (int) obj->spi.spi);
SPI_DisableInt(((SPI_T *) NU_MODBASE(obj->spi.spi)), (SPI_FIFO_RXOVR_INTEN_MASK | SPI_FIFO_RX_INTEN_MASK | SPI_FIFO_TX_INTEN_MASK));
NVIC_DisableIRQ(modinit->irq_n);
// Disable IP clock
CLK_DisableModuleClock(modinit->clkidx);
// Mark this module to be deinited.
int i = modinit - spi_modinit_tab;
spi_modinit_mask &= ~(1 << i);
}
void spi_format(spi_t *obj, int bits, int mode, int slave)
{
MBED_ASSERT(bits >= NU_SPI_FRAME_MIN && bits <= NU_SPI_FRAME_MAX);
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
SPI_DISABLE_SYNC(spi_base);
SPI_Open(spi_base,
@ -295,7 +297,7 @@ void spi_frequency(spi_t *obj, int hz)
int spi_master_write(spi_t *obj, int value)
{
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
// NOTE: Data in receive FIFO can be read out via ICE.
// NOTE:
// NUC472/M453/M487: SPI_CTL.SPIEN is controlled by software (in FIFO mode).
@ -306,7 +308,7 @@ int spi_master_write(spi_t *obj, int value)
while(! spi_writeable(obj));
uint32_t TX = (NU_MODSUBINDEX(obj->spi.spi) == 0) ? ((uint32_t) &spi_base->TX0) : ((uint32_t) &spi_base->TX1);
M32(TX) = value;
// Wait for rx buffer full
while (! spi_readable(obj));
uint32_t RX = (NU_MODSUBINDEX(obj->spi.spi) == 0) ? ((uint32_t) &spi_base->RX0) : ((uint32_t) &spi_base->RX1);
@ -389,7 +391,7 @@ void spi_master_transfer(spi_t *obj, const void *tx, size_t tx_length, void *rx,
dma_channel_free(obj->spi.dma_chn_id_rx);
obj->spi.dma_chn_id_rx = DMA_ERROR_OUT_OF_CHANNELS;
}
// SPI IRQ is necessary for both interrupt way and DMA way
spi_enable_event(obj, event, 1);
spi_buffer_set(obj, tx, tx_length, rx, rx_length);
@ -405,7 +407,7 @@ void spi_master_transfer(spi_t *obj, const void *tx, size_t tx_length, void *rx,
// DMA way
const struct nu_modinit_s *modinit = get_modinit(obj->spi.spi, spi_modinit_tab);
MBED_ASSERT(modinit != NULL);
MBED_ASSERT((SPIName) modinit->modname == obj->spi.spi);
MBED_ASSERT(modinit->modname == (int) obj->spi.spi);
// Configure tx DMA
dma_enable(obj->spi.dma_chn_id_tx, 1); // Enable this DMA channel
@ -497,7 +499,7 @@ void spi_master_transfer(spi_t *obj, const void *tx, size_t tx_length, void *rx,
void spi_abort_asynch(spi_t *obj)
{
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
if (obj->spi.dma_usage != DMA_USAGE_NEVER) {
// Receive FIFO Overrun in case of tx length > rx length on DMA way
if (spi_base->STATUS & SPI_STATUS_RX_OVER_RUN_Msk) {
@ -509,18 +511,16 @@ void spi_abort_asynch(spi_t *obj)
// NOTE: On NUC472, next PDMA transfer will fail with PDMA_STOP() called.
dma_enable(obj->spi.dma_chn_id_tx, 0);
}
//SPI_DISABLE_TX_PDMA(((SPI_T *) NU_MODBASE(obj->spi.spi)));
spi_base->DMA &= ~SPI_DMA_TX_DMA_EN_Msk;
if (obj->spi.dma_chn_id_rx != DMA_ERROR_OUT_OF_CHANNELS) {
PDMA_DisableInt(obj->spi.dma_chn_id_rx, PDMA_IER_TD_IE_Msk);
// NOTE: On NUC472, next PDMA transfer will fail with PDMA_STOP() called.
dma_enable(obj->spi.dma_chn_id_rx, 0);
}
//SPI_DISABLE_RX_PDMA(((SPI_T *) NU_MODBASE(obj->spi.spi)));
spi_base->DMA &= ~SPI_DMA_RX_DMA_EN_Msk;
}
// Necessary for both interrupt way and DMA way
spi_enable_vector_interrupt(obj, 0, 0);
spi_master_enable_interrupt(obj, 0, SPI_FIFO_RX_INTEN_MASK | SPI_FIFO_TX_INTEN_MASK);
@ -599,7 +599,7 @@ static int spi_readable(spi_t * obj)
}
static void spi_enable_event(spi_t *obj, uint32_t event, uint8_t enable)
{
{
obj->spi.event &= ~SPI_EVENT_ALL;
obj->spi.event |= (event & SPI_EVENT_ALL);
if (event & SPI_EVENT_RX_OVERFLOW) {
@ -611,7 +611,7 @@ static void spi_enable_vector_interrupt(spi_t *obj, uint32_t handler, uint8_t en
{
const struct nu_modinit_s *modinit = get_modinit(obj->spi.spi, spi_modinit_tab);
MBED_ASSERT(modinit != NULL);
MBED_ASSERT((SPIName) modinit->modname == obj->spi.spi);
MBED_ASSERT(modinit->modname == (int) obj->spi.spi);
struct nu_spi_var *var = (struct nu_spi_var *) modinit->var;
@ -654,16 +654,16 @@ static uint32_t spi_event_check(spi_t *obj)
{
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
uint32_t event = 0;
if (obj->spi.dma_usage == DMA_USAGE_NEVER) {
uint32_t n_rec = spi_master_read_asynch(obj);
spi_master_write_asynch(obj, n_rec);
}
if (spi_is_tx_complete(obj) && spi_is_rx_complete(obj)) {
event |= SPI_EVENT_COMPLETE;
}
// Receive FIFO Overrun
if (spi_base->STATUS & SPI_STATUS_RX_OVER_RUN_Msk) {
spi_base->STATUS = SPI_STATUS_RX_OVER_RUN_Msk;
@ -672,10 +672,11 @@ static uint32_t spi_event_check(spi_t *obj)
event |= SPI_EVENT_RX_OVERFLOW;
}
}
// Receive Time-Out
if (spi_base->STATUS & SPI_STATUS_TIME_OUT_STS_Msk) {
spi_base->STATUS = SPI_STATUS_TIME_OUT_STS_Msk;
// Not using this IF. Just clear it.
}
return event;
@ -721,12 +722,12 @@ static uint32_t spi_master_write_asynch(spi_t *obj, uint32_t tx_limit)
tx += 1;
break;
}
obj->tx_buff.pos ++;
}
n_words ++;
}
//Return the number of words that have been sent
return n_words;
}
@ -777,12 +778,12 @@ static uint32_t spi_master_read_asynch(spi_t *obj)
*rx ++ = M32(RX);
break;
}
obj->rx_buff.pos ++;
}
n_words ++;
}
// Return the number of words received
return n_words;
}
@ -808,12 +809,12 @@ static void spi_check_dma_usage(DMAUsage *dma_usage, int *dma_ch_tx, int *dma_ch
if (*dma_ch_rx == DMA_ERROR_OUT_OF_CHANNELS) {
*dma_ch_rx = dma_channel_allocate(DMA_CAP_NONE);
}
if (*dma_ch_tx == DMA_ERROR_OUT_OF_CHANNELS || *dma_ch_rx == DMA_ERROR_OUT_OF_CHANNELS) {
*dma_usage = DMA_USAGE_NEVER;
}
}
if (*dma_usage == DMA_USAGE_NEVER) {
dma_channel_free(*dma_ch_tx);
*dma_ch_tx = DMA_ERROR_OUT_OF_CHANNELS;
@ -825,12 +826,12 @@ static void spi_check_dma_usage(DMAUsage *dma_usage, int *dma_ch_tx, int *dma_ch
static uint8_t spi_get_data_width(spi_t *obj)
{
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
uint32_t data_width = ((spi_base->CTL & SPI_CTL_TX_BIT_LEN_Msk) >> SPI_CTL_TX_BIT_LEN_Pos);
if (data_width == 0) {
data_width = 32;
}
return data_width;
}
@ -847,7 +848,7 @@ static int spi_is_rx_complete(spi_t *obj)
static void spi_dma_handler_tx(uint32_t id, uint32_t event_dma)
{
spi_t *obj = (spi_t *) id;
// TODO: Pass this error to caller
if (event_dma & DMA_EVENT_ABORT) {
}
@ -858,11 +859,11 @@ static void spi_dma_handler_tx(uint32_t id, uint32_t event_dma)
// TODO: Pass this error to caller
if (event_dma & DMA_EVENT_TIMEOUT) {
}
const struct nu_modinit_s *modinit = get_modinit(obj->spi.spi, spi_modinit_tab);
MBED_ASSERT(modinit != NULL);
MBED_ASSERT((SPIName) modinit->modname == obj->spi.spi);
MBED_ASSERT(modinit->modname == (int) obj->spi.spi);
void (*vec)(void) = (void (*)(void)) NVIC_GetVector(modinit->irq_n);
vec();
}
@ -870,7 +871,7 @@ static void spi_dma_handler_tx(uint32_t id, uint32_t event_dma)
static void spi_dma_handler_rx(uint32_t id, uint32_t event_dma)
{
spi_t *obj = (spi_t *) id;
// TODO: Pass this error to caller
if (event_dma & DMA_EVENT_ABORT) {
}
@ -881,11 +882,11 @@ static void spi_dma_handler_rx(uint32_t id, uint32_t event_dma)
// TODO: Pass this error to caller
if (event_dma & DMA_EVENT_TIMEOUT) {
}
const struct nu_modinit_s *modinit = get_modinit(obj->spi.spi, spi_modinit_tab);
MBED_ASSERT(modinit != NULL);
MBED_ASSERT((SPIName) modinit->modname == obj->spi.spi);
MBED_ASSERT(modinit->modname == (int) obj->spi.spi);
void (*vec)(void) = (void (*)(void)) NVIC_GetVector(modinit->irq_n);
vec();
}

View File

@ -136,30 +136,25 @@ void spi_init(spi_t *obj, PinName mosi, PinName miso, PinName sclk, PinName ssel
const struct nu_modinit_s *modinit = get_modinit(obj->spi.spi, spi_modinit_tab);
MBED_ASSERT(modinit != NULL);
MBED_ASSERT(modinit->modname == obj->spi.spi);
MBED_ASSERT(modinit->modname == (int) obj->spi.spi);
// Reset this module
SYS_ResetModule(modinit->rsetidx);
// Enable IP clock
CLK_EnableModuleClock(modinit->clkidx);
//SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
pinmap_pinout(mosi, PinMap_SPI_MOSI);
pinmap_pinout(miso, PinMap_SPI_MISO);
pinmap_pinout(sclk, PinMap_SPI_SCLK);
pinmap_pinout(ssel, PinMap_SPI_SSEL);
obj->spi.pin_mosi = mosi;
obj->spi.pin_miso = miso;
obj->spi.pin_sclk = sclk;
obj->spi.pin_ssel = ssel;
// Configure the SPI data format and frequency
//spi_format(obj, 8, 0, SPI_MSB); // 8 bits, mode 0
//spi_frequency(obj, 1000000);
#if DEVICE_SPI_ASYNCH
obj->spi.dma_usage = DMA_USAGE_NEVER;
obj->spi.event = 0;
@ -191,27 +186,26 @@ void spi_free(spi_t *obj)
#endif
SPI_Close((SPI_T *) NU_MODBASE(obj->spi.spi));
const struct nu_modinit_s *modinit = get_modinit(obj->spi.spi, spi_modinit_tab);
MBED_ASSERT(modinit != NULL);
MBED_ASSERT(modinit->modname == obj->spi.spi);
MBED_ASSERT(modinit->modname == (int) obj->spi.spi);
SPI_DisableInt(((SPI_T *) NU_MODBASE(obj->spi.spi)), (SPI_FIFO_RXOVIEN_MASK | SPI_FIFO_RXTHIEN_MASK | SPI_FIFO_TXTHIEN_MASK));
NVIC_DisableIRQ(modinit->irq_n);
// Disable IP clock
CLK_DisableModuleClock(modinit->clkidx);
//((struct nu_spi_var *) modinit->var)->obj = NULL;
// Mark this module to be deinited.
int i = modinit - spi_modinit_tab;
spi_modinit_mask &= ~(1 << i);
}
void spi_format(spi_t *obj, int bits, int mode, int slave)
{
MBED_ASSERT(bits >= NU_SPI_FRAME_MIN && bits <= NU_SPI_FRAME_MAX);
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
SPI_DISABLE_SYNC(spi_base);
@ -223,11 +217,11 @@ void spi_format(spi_t *obj, int bits, int mode, int slave)
SPI_GetBusClock(spi_base));
// NOTE: Hardcode to be MSB first.
SPI_SET_MSB_FIRST(spi_base);
if (! slave) {
// Master
if (obj->spi.pin_ssel != NC) {
// Configure SS as low active.
// Configure SS as low active.
SPI_EnableAutoSS(spi_base, SPI_SS0, SPI_SS_ACTIVE_LOW);
// NOTE: In NUC472 series, all SPI SS pins are SS0, so we can hardcode SS0 here.
}
@ -260,10 +254,10 @@ void spi_frequency(spi_t *obj, int hz)
int spi_master_write(spi_t *obj, int value)
{
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
// NOTE: Data in receive FIFO can be read out via ICE.
SPI_ENABLE_SYNC(spi_base);
// Wait for tx buffer empty
while(! spi_writeable(obj));
SPI_WRITE_TX(spi_base, value);
@ -296,18 +290,18 @@ int spi_master_block_write(spi_t *obj, const char *tx_buffer, int tx_length,
int spi_slave_receive(spi_t *obj)
{
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
SPI_ENABLE_SYNC(spi_base);
return spi_readable(obj);
};
int spi_slave_read(spi_t *obj)
{
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
SPI_ENABLE_SYNC(spi_base);
// Wait for rx buffer full
while (! spi_readable(obj));
int value = SPI_READ_RX(spi_base);
@ -317,9 +311,9 @@ int spi_slave_read(spi_t *obj)
void spi_slave_write(spi_t *obj, int value)
{
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
SPI_ENABLE_SYNC(spi_base);
// Wait for tx buffer empty
while(! spi_writeable(obj));
SPI_WRITE_TX(spi_base, value);
@ -329,10 +323,9 @@ void spi_slave_write(spi_t *obj, int value)
#if DEVICE_SPI_ASYNCH
void spi_master_transfer(spi_t *obj, const void *tx, size_t tx_length, void *rx, size_t rx_length, uint8_t bit_width, uint32_t handler, uint32_t event, DMAUsage hint)
{
//MBED_ASSERT(bits >= NU_SPI_FRAME_MIN && bits <= NU_SPI_FRAME_MAX);
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
SPI_SET_DATA_WIDTH(spi_base, bit_width);
obj->spi.dma_usage = hint;
spi_check_dma_usage(&obj->spi.dma_usage, &obj->spi.dma_chn_id_tx, &obj->spi.dma_chn_id_rx);
uint32_t data_width = spi_get_data_width(obj);
@ -347,13 +340,13 @@ void spi_master_transfer(spi_t *obj, const void *tx, size_t tx_length, void *rx,
dma_channel_free(obj->spi.dma_chn_id_rx);
obj->spi.dma_chn_id_rx = DMA_ERROR_OUT_OF_CHANNELS;
}
// SPI IRQ is necessary for both interrupt way and DMA way
spi_enable_event(obj, event, 1);
spi_buffer_set(obj, tx, tx_length, rx, rx_length);
SPI_ENABLE_SYNC(spi_base);
if (obj->spi.dma_usage == DMA_USAGE_NEVER) {
// Interrupt way
spi_master_write_asynch(obj, NU_SPI_FIFO_DEPTH / 2);
@ -363,10 +356,10 @@ void spi_master_transfer(spi_t *obj, const void *tx, size_t tx_length, void *rx,
// DMA way
const struct nu_modinit_s *modinit = get_modinit(obj->spi.spi, spi_modinit_tab);
MBED_ASSERT(modinit != NULL);
MBED_ASSERT(modinit->modname == obj->spi.spi);
MBED_ASSERT(modinit->modname == (int) obj->spi.spi);
PDMA_T *pdma_base = dma_modbase();
// Configure tx DMA
pdma_base->CHCTL |= 1 << obj->spi.dma_chn_id_tx; // Enable this DMA channel
PDMA_SetTransferMode(obj->spi.dma_chn_id_tx,
@ -388,7 +381,7 @@ void spi_master_transfer(spi_t *obj, const void *tx, size_t tx_length, void *rx,
0); // Interrupt type. No use here
// Register DMA event handler
dma_set_handler(obj->spi.dma_chn_id_tx, (uint32_t) spi_dma_handler_tx, (uint32_t) obj, DMA_EVENT_ALL);
// Configure rx DMA
pdma_base->CHCTL |= 1 << obj->spi.dma_chn_id_rx; // Enable this DMA channel
PDMA_SetTransferMode(obj->spi.dma_chn_id_rx,
@ -462,37 +455,35 @@ void spi_abort_asynch(spi_t *obj)
{
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
PDMA_T *pdma_base = dma_modbase();
if (obj->spi.dma_usage != DMA_USAGE_NEVER) {
// Receive FIFO Overrun in case of tx length > rx length on DMA way
if (spi_base->STATUS & SPI_STATUS_RXOVIF_Msk) {
spi_base->STATUS = SPI_STATUS_RXOVIF_Msk;
}
if (obj->spi.dma_chn_id_tx != DMA_ERROR_OUT_OF_CHANNELS) {
PDMA_DisableInt(obj->spi.dma_chn_id_tx, 0);
// FIXME: Next PDMA transfer will fail with PDMA_STOP() called. Cause is unknown.
//PDMA_STOP(obj->spi.dma_chn_id_tx);
// NOTE: On NUC472, next PDMA transfer will fail with PDMA_STOP() called. Cause is unknown.
pdma_base->CHCTL &= ~(1 << obj->spi.dma_chn_id_tx);
}
SPI_DISABLE_TX_PDMA(((SPI_T *) NU_MODBASE(obj->spi.spi)));
if (obj->spi.dma_chn_id_rx != DMA_ERROR_OUT_OF_CHANNELS) {
PDMA_DisableInt(obj->spi.dma_chn_id_rx, 0);
// FIXME: Next PDMA transfer will fail with PDMA_STOP() called. Cause is unknown.
//PDMA_STOP(obj->spi.dma_chn_id_rx);
// NOTE: On NUC472, next PDMA transfer will fail with PDMA_STOP() called. Cause is unknown.
pdma_base->CHCTL &= ~(1 << obj->spi.dma_chn_id_rx);
}
SPI_DISABLE_RX_PDMA(((SPI_T *) NU_MODBASE(obj->spi.spi)));
}
// Necessary for both interrupt way and DMA way
spi_enable_vector_interrupt(obj, 0, 0);
spi_master_enable_interrupt(obj, 0);
/* Necessary for accessing FIFOCTL below */
SPI_DISABLE_SYNC(spi_base);
SPI_ClearRxFIFO(spi_base);
SPI_ClearTxFIFO(spi_base);
}
@ -530,7 +521,6 @@ uint8_t spi_active(spi_t *obj)
static int spi_writeable(spi_t * obj)
{
// Receive FIFO must not be full to avoid receive FIFO overflow on next transmit/receive
//return (! SPI_GET_TX_FIFO_FULL_FLAG(((SPI_T *) NU_MODBASE(obj->spi.spi)))) && (SPI_GET_RX_FIFO_COUNT(((SPI_T *) NU_MODBASE(obj->spi.spi))) < NU_SPI_FIFO_DEPTH);
return (! SPI_GET_TX_FIFO_FULL_FLAG(((SPI_T *) NU_MODBASE(obj->spi.spi))));
}
@ -540,7 +530,7 @@ static int spi_readable(spi_t * obj)
}
static void spi_enable_event(spi_t *obj, uint32_t event, uint8_t enable)
{
{
obj->spi.event &= ~SPI_EVENT_ALL;
obj->spi.event |= (event & SPI_EVENT_ALL);
if (event & SPI_EVENT_RX_OVERFLOW) {
@ -552,8 +542,8 @@ static void spi_enable_vector_interrupt(spi_t *obj, uint32_t handler, uint8_t en
{
const struct nu_modinit_s *modinit = get_modinit(obj->spi.spi, spi_modinit_tab);
MBED_ASSERT(modinit != NULL);
MBED_ASSERT(modinit->modname == obj->spi.spi);
MBED_ASSERT(modinit->modname == (int) obj->spi.spi);
if (enable) {
NVIC_SetVector(modinit->irq_n, handler);
NVIC_EnableIRQ(modinit->irq_n);
@ -565,12 +555,12 @@ static void spi_enable_vector_interrupt(spi_t *obj, uint32_t handler, uint8_t en
}
static void spi_master_enable_interrupt(spi_t *obj, uint8_t enable)
{
{
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
if (enable) {
SPI_SetFIFOThreshold(spi_base, 4, 4);
//SPI_SET_SUSPEND_CYCLE(spi_base, 4);
// Enable tx/rx FIFO threshold interrupt
SPI_EnableInt(spi_base, SPI_FIFO_RXTHIEN_MASK | SPI_FIFO_TXTHIEN_MASK);
}
@ -583,16 +573,16 @@ static uint32_t spi_event_check(spi_t *obj)
{
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
uint32_t event = 0;
if (obj->spi.dma_usage == DMA_USAGE_NEVER) {
uint32_t n_rec = spi_master_read_asynch(obj);
spi_master_write_asynch(obj, n_rec);
}
if (spi_is_tx_complete(obj) && spi_is_rx_complete(obj)) {
event |= SPI_EVENT_COMPLETE;
}
// Receive FIFO Overrun
if (spi_base->STATUS & SPI_STATUS_RXOVIF_Msk) {
spi_base->STATUS = SPI_STATUS_RXOVIF_Msk;
@ -601,7 +591,7 @@ static uint32_t spi_event_check(spi_t *obj)
event |= SPI_EVENT_RX_OVERFLOW;
}
}
// Receive Time-Out
if (spi_base->STATUS & SPI_STATUS_RXTOIF_Msk) {
spi_base->STATUS = SPI_STATUS_RXTOIF_Msk;
@ -612,7 +602,7 @@ static uint32_t spi_event_check(spi_t *obj)
spi_base->STATUS = SPI_STATUS_TXUFIF_Msk;
event |= SPI_EVENT_ERROR;
}
return event;
}
@ -634,7 +624,7 @@ static uint32_t spi_master_write_asynch(spi_t *obj, uint32_t tx_limit)
uint8_t bytes_per_word = (data_width + 7) / 8;
uint8_t *tx = (uint8_t *)(obj->tx_buff.buffer) + bytes_per_word * obj->tx_buff.pos;
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
while ((n_words < max_tx) && spi_writeable(obj)) {
if (spi_is_tx_complete(obj)) {
// Transmit dummy as transmit buffer is empty
@ -655,12 +645,12 @@ static uint32_t spi_master_write_asynch(spi_t *obj, uint32_t tx_limit)
tx += 1;
break;
}
obj->tx_buff.pos ++;
}
n_words ++;
}
//Return the number of words that have been sent
return n_words;
}
@ -686,7 +676,7 @@ static uint32_t spi_master_read_asynch(spi_t *obj)
uint8_t bytes_per_word = (data_width + 7) / 8;
uint8_t *rx = (uint8_t *)(obj->rx_buff.buffer) + bytes_per_word * obj->rx_buff.pos;
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
while ((n_words < max_rx) && spi_readable(obj)) {
if (spi_is_rx_complete(obj)) {
// Disregard as receive buffer is full
@ -710,12 +700,12 @@ static uint32_t spi_master_read_asynch(spi_t *obj)
*rx ++ = SPI_READ_RX(spi_base);
break;
}
obj->rx_buff.pos ++;
}
n_words ++;
}
// Return the number of words received
return n_words;
}
@ -741,12 +731,12 @@ static void spi_check_dma_usage(DMAUsage *dma_usage, int *dma_ch_tx, int *dma_ch
if (*dma_ch_rx == DMA_ERROR_OUT_OF_CHANNELS) {
*dma_ch_rx = dma_channel_allocate(DMA_CAP_NONE);
}
if (*dma_ch_tx == DMA_ERROR_OUT_OF_CHANNELS || *dma_ch_rx == DMA_ERROR_OUT_OF_CHANNELS) {
*dma_usage = DMA_USAGE_NEVER;
}
}
if (*dma_usage == DMA_USAGE_NEVER) {
dma_channel_free(*dma_ch_tx);
*dma_ch_tx = DMA_ERROR_OUT_OF_CHANNELS;
@ -756,22 +746,20 @@ static void spi_check_dma_usage(DMAUsage *dma_usage, int *dma_ch_tx, int *dma_ch
}
static uint8_t spi_get_data_width(spi_t *obj)
{
{
SPI_T *spi_base = (SPI_T *) NU_MODBASE(obj->spi.spi);
uint32_t data_width = ((spi_base->CTL & SPI_CTL_DWIDTH_Msk) >> SPI_CTL_DWIDTH_Pos);
if (data_width == 0) {
data_width = 32;
}
return data_width;
}
static int spi_is_tx_complete(spi_t *obj)
{
// ???: Exclude tx fifo empty check due to no such interrupt on DMA way
return (obj->tx_buff.pos == obj->tx_buff.length);
//return (obj->tx_buff.pos == obj->tx_buff.length && SPI_GET_TX_FIFO_EMPTY_FLAG(((SPI_T *) NU_MODBASE(obj->spi.spi))));
}
static int spi_is_rx_complete(spi_t *obj)
@ -782,22 +770,22 @@ static int spi_is_rx_complete(spi_t *obj)
static void spi_dma_handler_tx(uint32_t id, uint32_t event_dma)
{
spi_t *obj = (spi_t *) id;
// FIXME: Pass this error to caller
// TODO: Pass this error to caller
if (event_dma & DMA_EVENT_ABORT) {
}
// Expect SPI IRQ will catch this transfer done event
if (event_dma & DMA_EVENT_TRANSFER_DONE) {
obj->tx_buff.pos = obj->tx_buff.length;
}
// FIXME: Pass this error to caller
// TODO: Pass this error to caller
if (event_dma & DMA_EVENT_TIMEOUT) {
}
const struct nu_modinit_s *modinit = get_modinit(obj->spi.spi, spi_modinit_tab);
MBED_ASSERT(modinit != NULL);
MBED_ASSERT(modinit->modname == obj->spi.spi);
MBED_ASSERT(modinit->modname == (int) obj->spi.spi);
void (*vec)(void) = (void (*)(void)) NVIC_GetVector(modinit->irq_n);
vec();
}
@ -805,22 +793,22 @@ static void spi_dma_handler_tx(uint32_t id, uint32_t event_dma)
static void spi_dma_handler_rx(uint32_t id, uint32_t event_dma)
{
spi_t *obj = (spi_t *) id;
// FIXME: Pass this error to caller
// TODO: Pass this error to caller
if (event_dma & DMA_EVENT_ABORT) {
}
// Expect SPI IRQ will catch this transfer done event
if (event_dma & DMA_EVENT_TRANSFER_DONE) {
obj->rx_buff.pos = obj->rx_buff.length;
}
// FIXME: Pass this error to caller
// TODO: Pass this error to caller
if (event_dma & DMA_EVENT_TIMEOUT) {
}
const struct nu_modinit_s *modinit = get_modinit(obj->spi.spi, spi_modinit_tab);
MBED_ASSERT(modinit != NULL);
MBED_ASSERT(modinit->modname == obj->spi.spi);
MBED_ASSERT(modinit->modname == (int) obj->spi.spi);
void (*vec)(void) = (void (*)(void)) NVIC_GetVector(modinit->irq_n);
vec();
}