diff --git a/drivers/include/drivers/SPI.h b/drivers/include/drivers/SPI.h index e8809b9c6c..69df632d33 100644 --- a/drivers/include/drivers/SPI.h +++ b/drivers/include/drivers/SPI.h @@ -119,7 +119,7 @@ const use_gpio_ssel_t use_gpio_ssel; * *

Sharing a Bus

*

Multiple %SPI devices may share the same physical bus, so long as each has its own dedicated chip select - * (CS) pin. To implement this sharing, each chip should create its own instance of the SPI class, passing + * (CS) pin. To implement this sharing, each chip's driver should create its own instance of the SPI class, passing * the same MOSI, MISO, and SCLK pins but a different CS pin. Mbed OS will internally share the %SPI hardware * between these objects. Note that this is completely different from how the I2C class handles * sharing.

@@ -133,9 +133,8 @@ const use_gpio_ssel_t use_gpio_ssel; * *

The frame size controls the effective width of data written and read from the chip. For example, if you set * frame size to 8, SPI::write(int) will take one byte and return one byte, but if you set it to 16, SPI::write(int) - * will take a 16 bit value and return a 16 bit value. You can also do complete transactions - * (e.g. SPI::write(const char *, int, char *, int)) with frame sizes other than 8. Just be sure to pass the - * length in bytes, not words!

+ * will take a 16 bit value and return a 16 bit value. You can also do transactions with frame sizes other + * than 8. Just be sure to pass the length in bytes, not words!

* *

It should be noted that changing the frame size can perform an apparent "endian swap" on data being * transmitted. For example, suppose you have the 32-bit integer 0x01020408. On a little-endian processor, @@ -212,7 +211,7 @@ const use_gpio_ssel_t use_gpio_ssel; *

This code will cause the data in \c command to be sent to the device and the response to be received into * \c response . During the transfer, the current thread is paused, but other threads can execute. * The non-blocking API does not pause the current thread, but is a bit more complicated to use. - * See the SPI::transfer_and_wait() implementation in the header file for an example.

+ * See the SPI::transfer_and_wait() implementation in SPI.cpp for an example.

* *

Async: DMA vs Interrupts

*

Some processors only provide asynchronous %SPI via interrupts, some only support DMA, and some offer both. @@ -357,27 +356,6 @@ public: */ virtual int write(int value); - /** - * @brief Write to the SPI Slave and obtain the response. - * - * The total number of bytes sent and received will be the maximum of - * tx_length and rx_length. The bytes written will be padded with the - * value 0xff. - * - * Note: Even if the word size / bits per frame is not 8, \c rx_length and \c tx_length - * still count bytes of input data, not numbers of words. - * - * @param tx_buffer Pointer to the byte-array of data to write to the device. - * @param tx_length Number of bytes to write, may be zero. - * @param rx_buffer Pointer to the byte-array of data to read from the device. - * @param rx_length Number of bytes to read, may be zero. - * @return - * The number of bytes written and read from the device. This is - * maximum of tx_length and rx_length. - */ - virtual int write(const char *tx_buffer, int tx_length, char *rx_buffer, int rx_length); - - // Convenience overload of the above for any integer type instead of char* /** * @brief Write to the SPI Slave and obtain the response. * @@ -400,25 +378,64 @@ public: typename std::enable_if::value, int>::type write(const WordT *tx_buffer, int tx_length, WordT *rx_buffer, int rx_length) { - return write(reinterpret_cast(tx_buffer), tx_length, reinterpret_cast(rx_buffer), rx_length); + return write_internal(reinterpret_cast(tx_buffer), tx_length, reinterpret_cast(rx_buffer), rx_length); } - /** Acquire exclusive access to this SPI bus. + // Overloads of the above to support passing nullptr + template + typename std::enable_if::value, int>::type + write(const std::nullptr_t *tx_buffer, int tx_length, WordT *rx_buffer, int rx_length) + { + return write_internal(reinterpret_cast(tx_buffer), tx_length, reinterpret_cast(rx_buffer), rx_length); + } + + template + typename std::enable_if::value, int>::type + write(const WordT *tx_buffer, int tx_length, std::nullptr_t *rx_buffer, int rx_length) + { + return write_internal(reinterpret_cast(tx_buffer), tx_length, reinterpret_cast(rx_buffer), rx_length); + } + + /** + * @brief Acquire exclusive access to this SPI bus. + * + * This function blocks until the chosen SPI peripheral is not being used by any other SPI objects. + * Careful -- if other code leaves the bus locked, this could block forever! */ virtual void lock(void); - /** Release exclusive access to this SPI bus. + /** + * @brief Release exclusive access to this SPI bus. + * + * This allows other code to do operations using the SPI peripheral. */ virtual void unlock(void); - /** Assert the Slave Select line, acquiring exclusive access to this SPI bus. + /** + * @brief Assert the Slave Select line and acquire exclusive access to this SPI bus. * - * If use_gpio_ssel was not passed to the constructor, this only acquires - * exclusive access; the Slave Select line will not activate until data is transferred. + * The slave select line will remain selected (low) for all following operations until + * you call #deselect() on this instance. This allows you to string together multiple SPI transactions + * as if they were a single operation (from the perspective of peripheral chips). + * + * If use_gpio_ssel was not passed to the constructor, manual control of the SSEL line is not possible, + * and this function behaves identically to #lock(). + * + * Like #lock(), this function will block until exclusive access can be acquired. + * + * \warning Do not call this function while an asynchronous transfer is in progress, + * as undefined behavior can occur. */ void select(void); - /** Deassert the Slave Select line, releasing exclusive access to this SPI bus. + /** + * @brief Deassert the Slave Select line, releasing exclusive access to this SPI bus. + * + * If use_gpio_ssel was not passed to the constructor, manual control of the SSEL line is not possible, + * and this function behaves identically to #unlock(). + * + * \warning Do not call this function while an asynchronous transfer is in progress, + * as undefined behavior can occur. */ void deselect(void); @@ -434,12 +451,12 @@ public: #if DEVICE_SPI_ASYNCH /** - * @brief Start non-blocking SPI transfer. + * @brief Start non-blocking %SPI transfer. * * This function locks the deep sleep until any event has occurred. * * @param tx_buffer The TX buffer with data to be transferred. If NULL is passed, - * the default SPI value is sent. + * the default %SPI value is sent. * @param tx_length The length of TX buffer in bytes. * @param rx_buffer The RX buffer which is used for received data. If NULL is passed, * received data are ignored. @@ -448,9 +465,6 @@ public: * @param event The logical OR of events to subscribe to. May be #SPI_EVENT_ALL, or some combination * of the flags #SPI_EVENT_ERROR, #SPI_EVENT_COMPLETE, or #SPI_EVENT_RX_OVERFLOW * - * \warning Be sure to call this function with pointer types matching the frame size set for the SPI bus, - * or undefined behavior may occur! - * * @return Operation result (integer) * @retval 0 If the transfer has started. * @retval -1 if the transfer could not be enqueued (increase drivers.spi_transaction_queue_len option) @@ -459,14 +473,27 @@ public: typename std::enable_if::value, int>::type transfer(const WordT *tx_buffer, int tx_length, WordT *rx_buffer, int rx_length, const event_callback_t &callback, int event = SPI_EVENT_COMPLETE) { - if (spi_active(&_peripheral->spi)) { - return queue_transfer(tx_buffer, tx_length, rx_buffer, rx_length, sizeof(WordT) * 8, callback, event); - } - start_transfer(tx_buffer, tx_length, rx_buffer, rx_length, sizeof(WordT) * 8, callback, event); - return 0; + return transfer_internal(tx_buffer, tx_length, rx_buffer, rx_length, callback, event); } - /** Start %SPI transfer and wait until it is complete. Like the transactional API this blocks the current thread, + // Overloads of the above to support passing nullptr + template + typename std::enable_if::value, int>::type + transfer(const std::nullptr_t *tx_buffer, int tx_length, WordT *rx_buffer, int rx_length, const event_callback_t &callback, int event = SPI_EVENT_COMPLETE) + { + return transfer_internal(tx_buffer, tx_length, rx_buffer, rx_length, callback, event); + } + template + typename std::enable_if::value, int>::type + transfer(const WordT *tx_buffer, int tx_length, std::nullptr_t *rx_buffer, int rx_length, const event_callback_t &callback, int event = SPI_EVENT_COMPLETE) + { + return transfer_internal(tx_buffer, tx_length, rx_buffer, rx_length, callback, event); + } + + /** + * @brief Start %SPI transfer and wait until it is complete. + * + * Like the transactional API this blocks the current thread, * however all work is done in the background and other threads may execute. * * As long as there is space, this function will enqueue the transfer request onto the peripheral, @@ -480,9 +507,6 @@ public: * @param rx_length The length of RX buffer in bytes If 0, no reception is done. * @param timeout timeout value. Use #rtos::Kernel::wait_for_u32_forever to wait forever (the default). * - * \warning Be sure to call this function with pointer types matching the frame size set for the SPI bus, - * or undefined behavior may occur! - * * @return Operation result (integer) * @retval -1 if the transfer could not be enqueued (increase drivers.spi_transaction_queue_len option) * @retval 1 on timeout @@ -493,53 +517,37 @@ public: typename std::enable_if::value, int>::type transfer_and_wait(const WordT *tx_buffer, int tx_length, WordT *rx_buffer, int rx_length, rtos::Kernel::Clock::duration_u32 timeout = rtos::Kernel::wait_for_u32_forever) { - // Use EventFlags to suspend the thread until the transfer finishes - rtos::EventFlags transferResultFlags("SPI::transfer_and_wait EvFlags"); - - // Simple callback from the transfer that sets the EventFlags using the I2C result event - event_callback_t transferCallback([&](int event) { - transferResultFlags.set(event); - }); - - int txRet = transfer(tx_buffer, tx_length, rx_buffer, rx_length, transferCallback, SPI_EVENT_ALL); - if (txRet != 0) { - return txRet; - } - - // Wait until transfer complete, error, or timeout - uint32_t result = transferResultFlags.wait_any_for(SPI_EVENT_ALL, timeout); - - if (result & osFlagsError) { - if (result == osFlagsErrorTimeout) { - // Timeout expired, cancel transfer. - abort_transfer(); - return 1; - } else { - // Other event flags error. Transfer might be still running so cancel it. - abort_transfer(); - return 2; - } - } else { - // Note: Cannot use a switch here because multiple flags might be set at the same time (possible - // in the STM32 HAL code at least). - if (result == SPI_EVENT_COMPLETE) { - return 0; - } else { - // SPI peripheral level error - return 2; - } - } + return transfer_and_wait_internal(tx_buffer, tx_length, rx_buffer, rx_length, timeout); } - /** Abort the on-going SPI transfer, and continue with transfers in the queue, if any. + // Overloads of the above to support passing nullptr + template + typename std::enable_if::value, int>::type + transfer_and_wait(const std::nullptr_t *tx_buffer, int tx_length, WordT *rx_buffer, int rx_length, rtos::Kernel::Clock::duration_u32 timeout = rtos::Kernel::wait_for_u32_forever) + { + return transfer_and_wait_internal(tx_buffer, tx_length, rx_buffer, rx_length, timeout); + } + template + typename std::enable_if::value, int>::type + transfer_and_wait(const WordT *tx_buffer, int tx_length, std::nullptr_t *rx_buffer, int rx_length, rtos::Kernel::Clock::duration_u32 timeout = rtos::Kernel::wait_for_u32_forever) + { + return transfer_and_wait_internal(tx_buffer, tx_length, rx_buffer, rx_length, timeout); + } + + /** + * @brief Abort the on-going SPI transfer, if any, and continue with transfers in the queue, if any. */ void abort_transfer(); - /** Clear the queue of transfers. + /** + * @brief Clear the queue of transfers. + * + * If a transfer is currently active, it will continue until complete. */ void clear_transfer_buffer(); - /** Clear the queue of transfers and abort the on-going transfer. + /** + * @brief Clear the queue of transfers and abort any on-going transfer. */ void abort_all_transfers(); @@ -568,7 +576,6 @@ protected: * @param rx_buffer The RX buffer which is used for received data. If NULL is passed, * received data are ignored. * @param rx_length The length of RX buffer in bytes. - * @param bit_width The buffers element width in bits. * @param callback The event callback function. * @param event The event mask of events to modify. * @@ -576,7 +583,32 @@ protected: * @retval 0 A transfer was started or added to the queue. * @retval -1 Transfer can't be added because queue is full. */ - int transfer(const void *tx_buffer, int tx_length, void *rx_buffer, int rx_length, unsigned char bit_width, const event_callback_t &callback, int event); + int transfer_internal(const void *tx_buffer, int tx_length, void *rx_buffer, int rx_length, const event_callback_t &callback, int event); + + /** + * @brief Start %SPI transfer and wait until it is complete. + * + * Like the transactional API this blocks the current thread, + * however all work is done in the background and other threads may execute. + * + * As long as there is space, this function will enqueue the transfer request onto the peripheral, + * and block until it is done. + * + * Internally, the chip vendor may implement this function using either DMA or interrupts. + * + * @param tx_buffer The TX buffer with data to be transferred. May be nullptr if tx_length is 0. + * @param tx_length The length of TX buffer in bytes. If 0, no transmission is done. + * @param rx_buffer The RX buffer, which is used for received data. May be nullptr if tx_length is 0. + * @param rx_length The length of RX buffer in bytes If 0, no reception is done. + * @param timeout timeout value. Use #rtos::Kernel::wait_for_u32_forever to wait forever (the default). + * + * @return Operation result (integer) + * @retval -1 if the transfer could not be enqueued (increase drivers.spi_transaction_queue_len option) + * @retval 1 on timeout + * @retval 2 on other error + * @retval 0 on success + */ + int transfer_and_wait_internal(const void *tx_buffer, int tx_length, void *rx_buffer, int rx_length, rtos::Kernel::Clock::duration_u32 timeout); /** Put a transfer on the transfer queue. * @@ -656,8 +688,12 @@ protected: spi_t spi{}; /* Used by lock and unlock for thread safety */ SingletonPtr mutex; - /* Current user of the SPI */ + /* Current user of the SPI, if any. */ SPI *owner = nullptr; + /* Number of SPI objects that have been created which reference this peripheral. */ + uint8_t numUsers = 0; + /* True iff anyone has ever called spi_init() / spi_init_direct() for this peripheral */ + bool initialized = false; #if DEVICE_SPI_ASYNCH && MBED_CONF_DRIVERS_SPI_TRANSACTION_QUEUE_LEN /* Queue of pending transfers */ SingletonPtr, MBED_CONF_DRIVERS_SPI_TRANSACTION_QUEUE_LEN> > transaction_buffer; @@ -682,6 +718,12 @@ protected: DMAUsage _usage; /* Current sate of the sleep manager */ bool _deep_sleep_locked; + /* Whether an async transfer is currently in progress. Specifically, this is true + * iff start_transfer() has been called and the chip has been selected but irq_handler_asynch() + * has NOT been called yet. */ + volatile bool _transfer_in_progress = false; + /* Event flags used for transfer_and_wait() */ + rtos::EventFlags _transfer_and_wait_flags; #endif // DEVICE_SPI_ASYNCH // Configuration. @@ -695,14 +737,14 @@ protected: /* Size of the SPI frame */ int _bits; - /* Clock polairy and phase */ + /* Clock polarity and phase */ int _mode; /* Clock frequency */ int _hz; /* Default character used for NULL transfers */ char _write_fill; /* Select count to handle re-entrant selection */ - int8_t _select_count; + volatile uint8_t _select_count = 0; /* Static pinmap data */ const spi_pinmap_t *_static_pinmap; /* SPI peripheral name */ @@ -711,6 +753,12 @@ protected: void (*_init_func)(SPI *); private: + + /** + * Get a reference to the mutex used to protect the peripherals array. + */ + rtos::Mutex &_get_peripherals_mutex(); + void _do_construct(); /** Private acquire function without locking/unlocking. @@ -720,15 +768,42 @@ private: void _set_ssel(int); /** Private lookup in the static _peripherals table. + * Should be called with _peripherals_mutex locked. */ static spi_peripheral_s *_lookup(SPIName name); + /** Allocate an entry in the static _peripherals table. + * Should be called with _peripherals_mutex locked. */ static spi_peripheral_s *_alloc(); + /// Deallocate the given peripheral. + /// Should be called with _peripherals_mutex locked. + static void _dealloc(spi_peripheral_s *peripheral); + static void _do_init(SPI *obj); static void _do_init_direct(SPI *obj); + /** + * @brief Write to the SPI Slave and obtain the response. + * + * The total number of bytes sent and received will be the maximum of + * tx_length and rx_length. The bytes written will be padded with the + * value 0xff. + * + * Note: Even if the word size / bits per frame is not 8, \c rx_length and \c tx_length + * still count bytes of input data, not numbers of words. + * + * @param tx_buffer Pointer to the byte-array of data to write to the device. + * @param tx_length Number of bytes to write, may be zero. + * @param rx_buffer Pointer to the byte-array of data to read from the device. + * @param rx_length Number of bytes to read, may be zero. + * @return + * The number of bytes written and read from the device. This is + * maximum of tx_length and rx_length. + */ + virtual int write_internal(const void *tx_buffer, int tx_length, void *rx_buffer, int rx_length); + #endif //!defined(DOXYGEN_ONLY) }; diff --git a/drivers/source/SPI.cpp b/drivers/source/SPI.cpp index 896f944931..376ec7c708 100644 --- a/drivers/source/SPI.cpp +++ b/drivers/source/SPI.cpp @@ -16,6 +16,7 @@ */ #include "drivers/SPI.h" #include "platform/mbed_critical.h" +#include "mbed_error.h" #if DEVICE_SPI_ASYNCH #include "platform/mbed_power_mgmt.h" @@ -31,6 +32,7 @@ int SPI::_peripherals_used; SPI::SPI(PinName mosi, PinName miso, PinName sclk, PinName ssel) : #if DEVICE_SPI_ASYNCH _irq(this), + _transfer_and_wait_flags("SPI::transfer_and_wait() flags"), #endif _mosi(mosi), _miso(miso), @@ -53,6 +55,7 @@ SPI::SPI(PinName mosi, PinName miso, PinName sclk, PinName ssel) : SPI::SPI(PinName mosi, PinName miso, PinName sclk, PinName ssel, use_gpio_ssel_t) : #if DEVICE_SPI_ASYNCH _irq(this), + _transfer_and_wait_flags("SPI::transfer_and_wait() flags"), #endif _mosi(mosi), _miso(miso), @@ -74,6 +77,7 @@ SPI::SPI(PinName mosi, PinName miso, PinName sclk, PinName ssel, use_gpio_ssel_t SPI::SPI(const spi_pinmap_t &pinmap) : #if DEVICE_SPI_ASYNCH _irq(this), + _transfer_and_wait_flags("SPI::transfer_and_wait() flags"), #endif _mosi(pinmap.mosi_pin), _miso(pinmap.miso_pin), @@ -91,6 +95,7 @@ SPI::SPI(const spi_pinmap_t &pinmap) : SPI::SPI(const spi_pinmap_t &pinmap, PinName ssel) : #if DEVICE_SPI_ASYNCH _irq(this), + _transfer_and_wait_flags("SPI::transfer_and_wait() flags"), #endif _mosi(pinmap.mosi_pin), _miso(pinmap.miso_pin), @@ -106,14 +111,22 @@ SPI::SPI(const spi_pinmap_t &pinmap, PinName ssel) : void SPI::_do_init(SPI *obj) { + obj->_peripheral->initialized = true; spi_init(&obj->_peripheral->spi, obj->_mosi, obj->_miso, obj->_sclk, obj->_hw_ssel); } void SPI::_do_init_direct(SPI *obj) { + obj->_peripheral->initialized = true; spi_init_direct(&obj->_peripheral->spi, obj->_static_pinmap); } +rtos::Mutex &SPI::_get_peripherals_mutex() +{ + static rtos::Mutex peripherals_mutex; + return peripherals_mutex; +} + void SPI::_do_construct() { // No lock needed in the constructor @@ -127,15 +140,23 @@ void SPI::_do_construct() _hz = 1000000; _write_fill = SPI_FILL_CHAR; - core_util_critical_section_enter(); - // lookup in a critical section if we already have it else initialize it + { + rtos::ScopedMutexLock lock(_get_peripherals_mutex()); - _peripheral = SPI::_lookup(_peripheral_name); - if (!_peripheral) { - _peripheral = SPI::_alloc(); - _peripheral->name = _peripheral_name; + // lookup and claim the peripheral with the mutex locked in case another thread is + // also trying to claim it + _peripheral = SPI::_lookup(_peripheral_name); + if (!_peripheral) { + _peripheral = SPI::_alloc(); + _peripheral->name = _peripheral_name; + } + + if (_peripheral->numUsers == std::numeric_limits::max()) { + MBED_ERROR(MBED_MAKE_ERROR(MBED_MODULE_DRIVER_SPI, MBED_ERROR_CODE_MUTEX_LOCK_FAILED), "Ref count at max!"); + } + + _peripheral->numUsers++; } - core_util_critical_section_exit(); #if DEVICE_SPI_ASYNCH && MBED_CONF_DRIVERS_SPI_TRANSACTION_QUEUE_LEN // prime the SingletonPtr, so we don't have a problem trying to @@ -148,32 +169,58 @@ void SPI::_do_construct() SPI::~SPI() { - SPI::lock(); - /* Make sure a stale pointer isn't left in peripheral's owner field */ - if (_peripheral->owner == this) { - _peripheral->owner = nullptr; + if (_peripheral->numUsers == 0) { + MBED_ERROR(MBED_MAKE_ERROR(MBED_MODULE_DRIVER_SPI, MBED_ERROR_CODE_MUTEX_UNLOCK_FAILED), "Ref count at 0?"); + } + + { + rtos::ScopedMutexLock lock(_get_peripherals_mutex()); + + /* Make sure a stale pointer isn't left in peripheral's owner field */ + if (_peripheral->owner == this) { + _peripheral->owner = nullptr; + } + + if (--_peripheral->numUsers == 0) { + _dealloc(_peripheral); + } } - SPI::unlock(); } SPI::spi_peripheral_s *SPI::_lookup(SPI::SPIName name) { SPI::spi_peripheral_s *result = nullptr; - core_util_critical_section_enter(); for (int idx = 0; idx < _peripherals_used; idx++) { - if (_peripherals[idx].name == name) { + if (_peripherals[idx].numUsers > 0 && _peripherals[idx].name == name) { result = &_peripherals[idx]; break; } } - core_util_critical_section_exit(); return result; } SPI::spi_peripheral_s *SPI::_alloc() { MBED_ASSERT(_peripherals_used < SPI_PERIPHERALS_USED); - return &_peripherals[_peripherals_used++]; + + // Find an unused peripheral to return + for (spi_peripheral_s &peripheral : _peripherals) { + if (peripheral.numUsers == 0) { + _peripherals_used++; + return &peripheral; + } + } + + MBED_ERROR(MBED_MAKE_ERROR(MBED_MODULE_DRIVER_SPI, MBED_ERROR_CODE_INVALID_DATA_DETECTED), "Can't find new peripheral!"); +} + +void SPI::_dealloc(SPI::spi_peripheral_s *peripheral) +{ + if (peripheral->initialized) { + spi_free(&peripheral->spi); + peripheral->initialized = false; + } + --_peripherals_used; } void SPI::format(int bits, int mode) @@ -226,10 +273,13 @@ int SPI::write(int value) return ret; } -int SPI::write(const char *tx_buffer, int tx_length, char *rx_buffer, int rx_length) +int SPI::write_internal(const void *tx_buffer, int tx_length, void *rx_buffer, int rx_length) { select(); - int ret = spi_master_block_write(&_peripheral->spi, tx_buffer, tx_length, rx_buffer, rx_length, _write_fill); + int ret = spi_master_block_write(&_peripheral->spi, + reinterpret_cast(tx_buffer), tx_length, + reinterpret_cast(rx_buffer), rx_length, + _write_fill); deselect(); return ret; } @@ -278,22 +328,78 @@ void SPI::set_default_write_value(char data) #if DEVICE_SPI_ASYNCH -int SPI::transfer(const void *tx_buffer, int tx_length, void *rx_buffer, int rx_length, unsigned char bit_width, const event_callback_t &callback, int event) +int SPI::transfer_internal(const void *tx_buffer, int tx_length, void *rx_buffer, int rx_length, const event_callback_t &callback, int event) { if (spi_active(&_peripheral->spi)) { - return queue_transfer(tx_buffer, tx_length, rx_buffer, rx_length, bit_width, callback, event); + return queue_transfer(tx_buffer, tx_length, rx_buffer, rx_length, _bits, callback, event); } - start_transfer(tx_buffer, tx_length, rx_buffer, rx_length, bit_width, callback, event); + start_transfer(tx_buffer, tx_length, rx_buffer, rx_length, _bits, callback, event); return 0; } +int SPI::transfer_and_wait_internal(const void *tx_buffer, int tx_length, void *rx_buffer, int rx_length, rtos::Kernel::Clock::duration_u32 timeout) +{ + // Simple callback from the transfer that sets the EventFlags using the I2C result event + event_callback_t transferCallback([&](int event) { + _transfer_and_wait_flags.set(event); + }); + + int txRet = transfer_internal(tx_buffer, tx_length, rx_buffer, rx_length, transferCallback, SPI_EVENT_ALL); + if (txRet != 0) { + return txRet; + } + + // Wait until transfer complete, error, or timeout + uint32_t result = _transfer_and_wait_flags.wait_any_for(SPI_EVENT_ALL, timeout); + + if (result & osFlagsError) { + if (result == osFlagsErrorTimeout) { + // Timeout expired, cancel transfer. + abort_transfer(); + return 1; + } else { + // Other event flags error. Transfer might be still running so cancel it. + abort_transfer(); + return 2; + } + } else { + // Note: Cannot use a switch here because multiple flags might be set at the same time (possible + // in the STM32 HAL code at least). + if (result == SPI_EVENT_COMPLETE) { + return 0; + } else { + // SPI peripheral level error + return 2; + } + } +} + + void SPI::abort_transfer() { - spi_abort_asynch(&_peripheral->spi); - unlock_deep_sleep(); + // There is a potential for race condition here which we need to be aware of. + // There may or may not be a transfer actually running when we enter this function. + // To work through this, if there is a transfer in progress, we use spi_abort_asynch + // which disables the transfer interrupt. + // Then, we check _transfer_in_progress again. If it is true, then it means the ISR + // fired during the call to spi_abort_async, so the transfer has already completed normally. + + if (_transfer_in_progress) { + spi_abort_asynch(&_peripheral->spi); + } + + if (_transfer_in_progress) { + // End-of-transfer ISR never fired, clean up. + unlock_deep_sleep(); + + if (--_select_count == 0) { + _set_ssel(1); + } + #if MBED_CONF_DRIVERS_SPI_TRANSACTION_QUEUE_LEN - dequeue_transaction(); + dequeue_transaction(); #endif + } } void SPI::clear_transfer_buffer() @@ -350,10 +456,17 @@ int SPI::queue_transfer(const void *tx_buffer, int tx_length, void *rx_buffer, i void SPI::start_transfer(const void *tx_buffer, int tx_length, void *rx_buffer, int rx_length, unsigned char bit_width, const event_callback_t &callback, int event) { lock_deep_sleep(); - _acquire(); - _set_ssel(0); + + // Acquire the hardware and (if using GPIO CS mode) select the chip. + // But, if the user has already called select(), we can skip this step. + if (_select_count++ == 0) { + _acquire(); + _set_ssel(0); + } + _callback = callback; _irq.callback(&SPI::irq_handler_asynch); + _transfer_in_progress = true; spi_master_transfer(&_peripheral->spi, tx_buffer, tx_length, rx_buffer, rx_length, bit_width, _irq.entry(), event, _usage); } @@ -396,7 +509,14 @@ void SPI::irq_handler_asynch(void) { int event = spi_irq_handler_asynch(&_peripheral->spi); if (_callback && (event & SPI_EVENT_ALL)) { - _set_ssel(1); + // If using GPIO CS mode, unless we were asked to keep the peripheral selected, deselect it. + // If there's another transfer queued, we *do* want to deselect the peripheral now. + // It will be reselected in start_transfer() which is called by dequeue_transaction() below. + if (--_select_count == 0) { + _set_ssel(1); + } + _transfer_in_progress = false; + unlock_deep_sleep(); _callback.call(event & SPI_EVENT_ALL); } diff --git a/hal/include/hal/spi_api.h b/hal/include/hal/spi_api.h index c0ca61729a..5f78f9b7b0 100644 --- a/hal/include/hal/spi_api.h +++ b/hal/include/hal/spi_api.h @@ -213,9 +213,6 @@ void spi_init_direct(spi_t *obj, const spi_pinmap_t *pinmap); void spi_init(spi_t *obj, PinName mosi, PinName miso, PinName sclk, PinName ssel); /** Release a SPI object - * - * TODO: spi_free is currently unimplemented - * This will require reference counting at the C++ level to be safe * * Return the pins owned by the SPI object to their reset state * Disable the SPI peripheral