mirror of https://github.com/ARMmbed/mbed-os.git
Pull in lwip-eth updates from mbedmicro/mbed
Sync the directory lwip-eth with that of mbedmicro/mbed at revision
b32f7a9aaf
.
parent
ea432a8c3a
commit
eb76d0ebd6
|
@ -28,53 +28,57 @@
|
||||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "fsl_port_hal.h"
|
#include "fsl_port.h"
|
||||||
#include "fsl_clock_manager.h"
|
|
||||||
#include "fsl_device_registers.h"
|
|
||||||
#include "fsl_sim_hal.h"
|
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
* Code
|
* Code
|
||||||
******************************************************************************/
|
******************************************************************************/
|
||||||
void k64f_init_eth_hardware(void)
|
void k64f_init_eth_hardware(void)
|
||||||
{
|
{
|
||||||
uint8_t count;
|
port_pin_config_t configENET = {0};
|
||||||
|
|
||||||
/* Disable the mpu*/
|
/* Disable MPU. */
|
||||||
BW_MPU_CESR_VLD(MPU_BASE, 0);
|
MPU->CESR &= ~MPU_CESR_VLD_MASK;
|
||||||
|
|
||||||
/* Open POTR clock gate*/
|
CLOCK_EnableClock(kCLOCK_PortC);
|
||||||
for (count = 0; count < HW_PORT_INSTANCE_COUNT; count++)
|
/* Affects PORTC_PCR16 register */
|
||||||
{
|
PORT_SetPinMux(PORTC, 16u, kPORT_MuxAlt4);
|
||||||
CLOCK_SYS_EnablePortClock(count);
|
/* Affects PORTC_PCR17 register */
|
||||||
}
|
PORT_SetPinMux(PORTC, 17u, kPORT_MuxAlt4);
|
||||||
|
/* Affects PORTC_PCR18 register */
|
||||||
|
PORT_SetPinMux(PORTC, 18u, kPORT_MuxAlt4);
|
||||||
|
/* Affects PORTC_PCR19 register */
|
||||||
|
PORT_SetPinMux(PORTC, 19u, kPORT_MuxAlt4);
|
||||||
|
/* Affects PORTB_PCR1 register */
|
||||||
|
PORT_SetPinMux(PORTB, 1u, kPORT_MuxAlt4);
|
||||||
|
|
||||||
/* Configure gpio*/
|
configENET.openDrainEnable = kPORT_OpenDrainEnable;
|
||||||
PORT_HAL_SetMuxMode(PORTA_BASE, 12, kPortMuxAlt4); /*!< ENET RMII0_RXD1/MII0_RXD1*/
|
configENET.mux = kPORT_MuxAlt4;
|
||||||
PORT_HAL_SetMuxMode(PORTA_BASE, 13, kPortMuxAlt4); /*!< ENET RMII0_RXD0/MII0_RXD0*/
|
configENET.pullSelect = kPORT_PullUp;
|
||||||
PORT_HAL_SetMuxMode(PORTA_BASE, 14, kPortMuxAlt4); /*!< ENET RMII0_CRS_DV/MII0_RXDV*/
|
/* Ungate the port clock */
|
||||||
PORT_HAL_SetMuxMode(PORTA_BASE, 15, kPortMuxAlt4); /*!< ENET RMII0_TXEN/MII0_TXEN*/
|
CLOCK_EnableClock(kCLOCK_PortA);
|
||||||
PORT_HAL_SetMuxMode(PORTA_BASE, 16, kPortMuxAlt4); /*!< ENET RMII0_TXD0/MII0_TXD0*/
|
/* Affects PORTB_PCR0 register */
|
||||||
PORT_HAL_SetMuxMode(PORTA_BASE, 17, kPortMuxAlt4); /*!< ENET RMII0_TXD01/MII0_TXD1*/
|
PORT_SetPinConfig(PORTB, 0u, &configENET);
|
||||||
PORT_HAL_SetMuxMode(PORTB_BASE, 0, kPortMuxAlt4); /*!< ENET RMII0_MDIO/MII0_MDIO*/
|
|
||||||
PORT_HAL_SetOpenDrainCmd(PORTB_BASE,0, true); /*!< ENET RMII0_MDC/MII0_MDC*/
|
|
||||||
|
|
||||||
// Added for FRDM-K64F
|
/* Affects PORTA_PCR13 register */
|
||||||
PORT_HAL_SetPullMode(PORTB_BASE, 0, kPortPullUp);
|
PORT_SetPinMux(PORTA, 13u, kPORT_MuxAlt4);
|
||||||
PORT_HAL_SetPullCmd(PORTB_BASE, 0, true);
|
/* Affects PORTA_PCR12 register */
|
||||||
|
PORT_SetPinMux(PORTA, 12u, kPORT_MuxAlt4);
|
||||||
|
/* Affects PORTA_PCR14 register */
|
||||||
|
PORT_SetPinMux(PORTA, 14u, kPORT_MuxAlt4);
|
||||||
|
/* Affects PORTA_PCR5 register */
|
||||||
|
PORT_SetPinMux(PORTA, 5u, kPORT_MuxAlt4);
|
||||||
|
/* Affects PORTA_PCR16 register */
|
||||||
|
PORT_SetPinMux(PORTA, 16u, kPORT_MuxAlt4);
|
||||||
|
/* Affects PORTA_PCR17 register */
|
||||||
|
PORT_SetPinMux(PORTA, 17u, kPORT_MuxAlt4);
|
||||||
|
/* Affects PORTA_PCR15 register */
|
||||||
|
PORT_SetPinMux(PORTA, 15u, kPORT_MuxAlt4);
|
||||||
|
/* Affects PORTA_PCR28 register */
|
||||||
|
PORT_SetPinMux(PORTA, 28u, kPORT_MuxAlt4);
|
||||||
|
|
||||||
PORT_HAL_SetMuxMode(PORTB_BASE, 1, kPortMuxAlt4);
|
/* Select the Ethernet timestamp clock source */
|
||||||
|
CLOCK_SetEnetTime0Clock(0x2);
|
||||||
#if FSL_FEATURE_ENET_SUPPORT_PTP
|
|
||||||
PORT_HAL_SetMuxMode(PORTC_BASE, (16 + ENET_TIMER_CHANNEL_NUM), kPortMuxAlt4); /* ENET ENET0_1588_TMR0*/
|
|
||||||
PORT_HAL_SetDriveStrengthMode(PORTC_BASE, (16 + ENET_TIMER_CHANNEL_NUM), kPortHighDriveStrength);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Open ENET clock gate*/
|
|
||||||
CLOCK_SYS_EnableEnetClock( 0U);
|
|
||||||
|
|
||||||
/* Select the ptp timer outclk*/
|
|
||||||
CLOCK_HAL_SetSource(g_simBaseAddr[0], kClockTimeSrc, 2);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
|
|
|
@ -12,11 +12,7 @@
|
||||||
#include "eth_arch.h"
|
#include "eth_arch.h"
|
||||||
#include "sys_arch.h"
|
#include "sys_arch.h"
|
||||||
|
|
||||||
#include "fsl_enet_driver.h"
|
#include "fsl_phy.h"
|
||||||
#include "fsl_enet_hal.h"
|
|
||||||
#include "fsl_device_registers.h"
|
|
||||||
#include "fsl_phy_driver.h"
|
|
||||||
#include "fsl_interrupt_manager.h"
|
|
||||||
#include "k64f_emac_config.h"
|
#include "k64f_emac_config.h"
|
||||||
#include <ctype.h>
|
#include <ctype.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
@ -25,14 +21,23 @@
|
||||||
|
|
||||||
#include "mbed_interface.h"
|
#include "mbed_interface.h"
|
||||||
|
|
||||||
extern IRQn_Type enet_irq_ids[HW_ENET_INSTANCE_COUNT][FSL_FEATURE_ENET_INTERRUPT_COUNT];
|
enet_handle_t g_handle;
|
||||||
extern uint8_t enetIntMap[kEnetIntNum];
|
// TX Buffer descriptors
|
||||||
extern void *enetIfHandle;
|
uint8_t *tx_desc_start_addr;
|
||||||
|
// RX Buffer descriptors
|
||||||
|
uint8_t *rx_desc_start_addr;
|
||||||
|
// RX packet buffer pointers
|
||||||
|
struct pbuf *rx_buff[ENET_RX_RING_LEN];
|
||||||
|
// TX packet buffer pointers
|
||||||
|
struct pbuf *tx_buff[ENET_RX_RING_LEN];
|
||||||
|
// RX packet payload pointers
|
||||||
|
uint32_t *rx_ptr[ENET_RX_RING_LEN];
|
||||||
|
|
||||||
/********************************************************************************
|
/********************************************************************************
|
||||||
* Internal data
|
* Internal data
|
||||||
********************************************************************************/
|
********************************************************************************/
|
||||||
|
#define ENET_BuffSizeAlign(n) ENET_ALIGN(n, ENET_BUFF_ALIGNMENT)
|
||||||
|
#define ENET_ALIGN(x,align) ((unsigned int)((x) + ((align)-1)) & (unsigned int)(~(unsigned int)((align)- 1)))
|
||||||
extern void k64f_init_eth_hardware(void);
|
extern void k64f_init_eth_hardware(void);
|
||||||
|
|
||||||
/* K64F EMAC driver data structure */
|
/* K64F EMAC driver data structure */
|
||||||
|
@ -42,49 +47,11 @@ struct k64f_enetdata {
|
||||||
sys_sem_t TxCleanSem; /**< TX cleanup thread wakeup semaphore */
|
sys_sem_t TxCleanSem; /**< TX cleanup thread wakeup semaphore */
|
||||||
sys_mutex_t TXLockMutex; /**< TX critical section mutex */
|
sys_mutex_t TXLockMutex; /**< TX critical section mutex */
|
||||||
sys_sem_t xTXDCountSem; /**< TX free buffer counting semaphore */
|
sys_sem_t xTXDCountSem; /**< TX free buffer counting semaphore */
|
||||||
volatile u32_t rx_free_descs; /**< Count of free RX descriptors */
|
|
||||||
struct pbuf *rxb[ENET_RX_RING_LEN]; /**< RX pbuf pointer list, zero-copy mode */
|
|
||||||
uint8_t *rx_desc_start_addr; /**< RX descriptor start address */
|
|
||||||
uint8_t *tx_desc_start_addr; /**< TX descriptor start address */
|
|
||||||
uint8_t tx_consume_index, tx_produce_index; /**< TX buffers ring */
|
uint8_t tx_consume_index, tx_produce_index; /**< TX buffers ring */
|
||||||
uint8_t rx_fill_index; /**< RX ring fill index */
|
|
||||||
struct pbuf *txb[ENET_TX_RING_LEN]; /**< TX pbuf pointer list, zero-copy mode */
|
|
||||||
void *txb_aligned[ENET_TX_RING_LEN]; /**< TX aligned buffers (if needed) */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct k64f_enetdata k64f_enetdata;
|
static struct k64f_enetdata k64f_enetdata;
|
||||||
|
|
||||||
static enet_dev_if_t enetDevIf[HW_ENET_INSTANCE_COUNT];
|
|
||||||
static enet_mac_config_t g_enetMacCfg[HW_ENET_INSTANCE_COUNT] =
|
|
||||||
{
|
|
||||||
{
|
|
||||||
ENET_ETH_MAX_FLEN , /*!< enet receive buffer size*/
|
|
||||||
ENET_RX_LARGE_BUFFER_NUM, /*!< enet large receive buffer number*/
|
|
||||||
ENET_RX_RING_LEN, /*!< enet receive bd number*/
|
|
||||||
ENET_TX_RING_LEN, /*!< enet transmit bd number*/
|
|
||||||
{0}, /*!< enet mac address*/
|
|
||||||
kEnetCfgRmii, /*!< enet rmii interface*/
|
|
||||||
kEnetCfgSpeed100M, /*!< enet rmii 100M*/
|
|
||||||
kEnetCfgFullDuplex, /*!< enet rmii Full- duplex*/
|
|
||||||
/*!< enet mac control flag recommended to use enet_mac_control_flag_t
|
|
||||||
we send frame with crc so receive crc forward for data length check test*/
|
|
||||||
kEnetRxCrcFwdEnable | kEnetRxFlowControlEnable,
|
|
||||||
true, /*!< enet txaccelerator enabled*/
|
|
||||||
true, /*!< enet rxaccelerator enabled*/
|
|
||||||
false, /*!< enet store and forward*/
|
|
||||||
{false, false, true, false, true}, /*!< enet rxaccelerator config*/
|
|
||||||
{false, false, true}, /*!< enet txaccelerator config*/
|
|
||||||
true, /*!< vlan frame support*/
|
|
||||||
true, /*!< phy auto discover*/
|
|
||||||
ENET_MII_CLOCK, /*!< enet MDC clock*/
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
static enet_phy_config_t g_enetPhyCfg[HW_ENET_INSTANCE_COUNT] =
|
|
||||||
{
|
|
||||||
{0, false}
|
|
||||||
};
|
|
||||||
|
|
||||||
/** \brief Driver transmit and receive thread priorities
|
/** \brief Driver transmit and receive thread priorities
|
||||||
*
|
*
|
||||||
* Thread priorities for receive thread and TX cleanup thread. Alter
|
* Thread priorities for receive thread and TX cleanup thread. Alter
|
||||||
|
@ -95,166 +62,35 @@ static enet_phy_config_t g_enetPhyCfg[HW_ENET_INSTANCE_COUNT] =
|
||||||
#define TX_PRIORITY (osPriorityNormal)
|
#define TX_PRIORITY (osPriorityNormal)
|
||||||
#define PHY_PRIORITY (osPriorityNormal)
|
#define PHY_PRIORITY (osPriorityNormal)
|
||||||
|
|
||||||
/** \brief Debug output formatter lock define
|
|
||||||
*
|
|
||||||
* When using FreeRTOS and with LWIP_DEBUG enabled, enabling this
|
|
||||||
* define will allow RX debug messages to not interleave with the
|
|
||||||
* TX messages (so they are actually readable). Not enabling this
|
|
||||||
* define when the system is under load will cause the output to
|
|
||||||
* be unreadable. There is a small tradeoff in performance for this
|
|
||||||
* so use it only for debug. */
|
|
||||||
//#define LOCK_RX_THREAD
|
|
||||||
|
|
||||||
/** \brief Signal used for ethernet ISR to signal packet_rx() thread.
|
|
||||||
*/
|
|
||||||
#define RX_SIGNAL 1
|
|
||||||
|
|
||||||
// K64F-specific macros
|
|
||||||
#define RX_PBUF_AUTO_INDEX (-1)
|
|
||||||
|
|
||||||
/********************************************************************************
|
/********************************************************************************
|
||||||
* Buffer management
|
* Buffer management
|
||||||
********************************************************************************/
|
********************************************************************************/
|
||||||
|
/*
|
||||||
/** \brief Queues a pbuf into the RX descriptor list
|
* This function will queue a new receive buffer
|
||||||
*
|
|
||||||
* \param[in] k64f_enet Pointer to the drvier data structure
|
|
||||||
* \param[in] p Pointer to pbuf to queue
|
|
||||||
* \param[in] bidx Index to queue into
|
|
||||||
*/
|
*/
|
||||||
static void k64f_rxqueue_pbuf(struct k64f_enetdata *k64f_enet, struct pbuf *p, int bidx)
|
static void update_read_buffer(uint8_t *buf)
|
||||||
{
|
{
|
||||||
enet_bd_struct_t *start = (enet_bd_struct_t *)k64f_enet->rx_desc_start_addr;
|
if (buf != NULL) {
|
||||||
int idx;
|
g_handle.rxBdCurrent->buffer = buf;
|
||||||
|
|
||||||
/* Get next free descriptor index */
|
|
||||||
if (bidx == RX_PBUF_AUTO_INDEX)
|
|
||||||
idx = k64f_enet->rx_fill_index;
|
|
||||||
else
|
|
||||||
idx = bidx;
|
|
||||||
|
|
||||||
/* Setup descriptor and clear statuses */
|
|
||||||
enet_hal_init_rxbds(start + idx, (uint8_t*)p->payload, idx == ENET_RX_RING_LEN - 1);
|
|
||||||
|
|
||||||
/* Save pbuf pointer for push to network layer later */
|
|
||||||
k64f_enet->rxb[idx] = p;
|
|
||||||
|
|
||||||
/* Wrap at end of descriptor list */
|
|
||||||
idx = (idx + 1) % ENET_RX_RING_LEN;
|
|
||||||
|
|
||||||
/* Queue descriptor(s) */
|
|
||||||
k64f_enet->rx_free_descs -= 1;
|
|
||||||
|
|
||||||
if (bidx == RX_PBUF_AUTO_INDEX)
|
|
||||||
k64f_enet->rx_fill_index = idx;
|
|
||||||
|
|
||||||
enet_hal_active_rxbd(BOARD_DEBUG_ENET_INSTANCE_ADDR);
|
|
||||||
|
|
||||||
LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
|
|
||||||
("k64f_rxqueue_pbuf: pbuf packet queued: %p (free desc=%d)\n", p,
|
|
||||||
k64f_enet->rx_free_descs));
|
|
||||||
}
|
|
||||||
|
|
||||||
/** \brief Attempt to allocate and requeue a new pbuf for RX
|
|
||||||
*
|
|
||||||
* \param[in] netif Pointer to the netif structure
|
|
||||||
* \returns number of queued packets
|
|
||||||
*/
|
|
||||||
s32_t k64f_rx_queue(struct netif *netif, int idx)
|
|
||||||
{
|
|
||||||
struct k64f_enetdata *k64f_enet = netif->state;
|
|
||||||
enet_dev_if_t *enetIfPtr = (enet_dev_if_t *)&enetDevIf[BOARD_DEBUG_ENET_INSTANCE];
|
|
||||||
struct pbuf *p;
|
|
||||||
int queued = 0;
|
|
||||||
|
|
||||||
/* Attempt to requeue as many packets as possible */
|
|
||||||
while (k64f_enet->rx_free_descs > 0) {
|
|
||||||
/* Allocate a pbuf from the pool. We need to allocate at the
|
|
||||||
maximum size as we don't know the size of the yet to be
|
|
||||||
received packet. */
|
|
||||||
p = pbuf_alloc(PBUF_RAW, enetIfPtr->macCfgPtr->rxBufferSize + RX_BUF_ALIGNMENT, PBUF_RAM);
|
|
||||||
if (p == NULL) {
|
|
||||||
LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
|
|
||||||
("k64_rx_queue: could not allocate RX pbuf (free desc=%d)\n",
|
|
||||||
k64f_enet->rx_free_descs));
|
|
||||||
return queued;
|
|
||||||
}
|
}
|
||||||
/* K64F note: the next line ensures that the RX buffer is properly aligned for the K64F
|
|
||||||
RX descriptors (16 bytes alignment). However, by doing so, we're effectively changing
|
|
||||||
a data structure which is internal to lwIP. This might not prove to be a good idea
|
|
||||||
in the long run, but a better fix would probably involve modifying lwIP itself */
|
|
||||||
p->payload = (void*)ENET_ALIGN((uint32_t)p->payload, RX_BUF_ALIGNMENT);
|
|
||||||
|
|
||||||
/* pbufs allocated from the RAM pool should be non-chained. */
|
/* Clears status. */
|
||||||
LWIP_ASSERT("k64f_rx_queue: pbuf is not contiguous (chained)", pbuf_clen(p) <= 1);
|
g_handle.rxBdCurrent->control &= ENET_BUFFDESCRIPTOR_RX_WRAP_MASK;
|
||||||
|
|
||||||
/* Queue packet */
|
/* Sets the receive buffer descriptor with the empty flag. */
|
||||||
k64f_rxqueue_pbuf(k64f_enet, p, idx);
|
g_handle.rxBdCurrent->control |= ENET_BUFFDESCRIPTOR_RX_EMPTY_MASK;
|
||||||
queued++;
|
|
||||||
}
|
|
||||||
|
|
||||||
return queued;
|
/* Increases the buffer descriptor to the next one. */
|
||||||
}
|
if (g_handle.rxBdCurrent->control & ENET_BUFFDESCRIPTOR_RX_WRAP_MASK) {
|
||||||
|
g_handle.rxBdCurrent = g_handle.rxBdBase;
|
||||||
|
g_handle.rxBdDirty = g_handle.rxBdBase;
|
||||||
|
} else {
|
||||||
|
g_handle.rxBdCurrent++;
|
||||||
|
g_handle.rxBdDirty++;
|
||||||
|
}
|
||||||
|
|
||||||
/** \brief Sets up the RX descriptor ring buffers.
|
/* Actives the receive buffer descriptor. */
|
||||||
*
|
ENET->RDAR = ENET_RDAR_RDAR_MASK;
|
||||||
* This function sets up the descriptor list used for receive packets.
|
|
||||||
*
|
|
||||||
* \param[in] netif Pointer to driver data structure
|
|
||||||
* \returns ERR_MEM if out of memory, ERR_OK otherwise
|
|
||||||
*/
|
|
||||||
static err_t k64f_rx_setup(struct netif *netif, enet_rxbd_config_t *rxbdCfg) {
|
|
||||||
struct k64f_enetdata *k64f_enet = netif->state;
|
|
||||||
enet_dev_if_t *enetIfPtr = (enet_dev_if_t *)&enetDevIf[BOARD_DEBUG_ENET_INSTANCE];
|
|
||||||
uint8_t *rxBdPtr;
|
|
||||||
uint32_t rxBufferSizeAligned;
|
|
||||||
|
|
||||||
// Allocate RX descriptors
|
|
||||||
rxBdPtr = (uint8_t *)calloc(1, enet_hal_get_bd_size() * enetIfPtr->macCfgPtr->rxBdNumber + ENET_BD_ALIGNMENT);
|
|
||||||
if(!rxBdPtr)
|
|
||||||
return ERR_MEM;
|
|
||||||
k64f_enet->rx_desc_start_addr = (uint8_t *)ENET_ALIGN((uint32_t)rxBdPtr, ENET_BD_ALIGNMENT);
|
|
||||||
k64f_enet->rx_free_descs = enetIfPtr->macCfgPtr->rxBdNumber;
|
|
||||||
k64f_enet->rx_fill_index = 0;
|
|
||||||
|
|
||||||
rxBufferSizeAligned = ENET_ALIGN(enetIfPtr->macCfgPtr->rxBufferSize, ENET_RX_BUFFER_ALIGNMENT);
|
|
||||||
enetIfPtr->macContextPtr->rxBufferSizeAligned = rxBufferSizeAligned;
|
|
||||||
rxbdCfg->rxBdPtrAlign = k64f_enet->rx_desc_start_addr;
|
|
||||||
rxbdCfg->rxBdNum = enetIfPtr->macCfgPtr->rxBdNumber;
|
|
||||||
rxbdCfg->rxBufferNum = enetIfPtr->macCfgPtr->rxBdNumber;
|
|
||||||
|
|
||||||
k64f_rx_queue(netif, RX_PBUF_AUTO_INDEX);
|
|
||||||
return ERR_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** \brief Sets up the TX descriptor ring buffers.
|
|
||||||
*
|
|
||||||
* This function sets up the descriptor list used for transmit packets.
|
|
||||||
*
|
|
||||||
* \param[in] netif Pointer to driver data structure
|
|
||||||
* \returns ERR_MEM if out of memory, ERR_OK otherwise
|
|
||||||
*/
|
|
||||||
static err_t k64f_tx_setup(struct netif *netif, enet_txbd_config_t *txbdCfg) {
|
|
||||||
struct k64f_enetdata *k64f_enet = netif->state;
|
|
||||||
enet_dev_if_t *enetIfPtr = (enet_dev_if_t *)&enetDevIf[BOARD_DEBUG_ENET_INSTANCE];
|
|
||||||
uint8_t *txBdPtr;
|
|
||||||
|
|
||||||
// Allocate TX descriptors
|
|
||||||
txBdPtr = (uint8_t *)calloc(1, enet_hal_get_bd_size() * enetIfPtr->macCfgPtr->txBdNumber + ENET_BD_ALIGNMENT);
|
|
||||||
if(!txBdPtr)
|
|
||||||
return ERR_MEM;
|
|
||||||
|
|
||||||
k64f_enet->tx_desc_start_addr = (uint8_t *)ENET_ALIGN((uint32_t)txBdPtr, ENET_BD_ALIGNMENT);
|
|
||||||
k64f_enet->tx_consume_index = k64f_enet->tx_produce_index = 0;
|
|
||||||
|
|
||||||
txbdCfg->txBdPtrAlign = k64f_enet->tx_desc_start_addr;
|
|
||||||
txbdCfg->txBufferNum = enetIfPtr->macCfgPtr->txBdNumber;
|
|
||||||
txbdCfg->txBufferSizeAlign = ENET_ALIGN(enetIfPtr->maxFrameSize, ENET_TX_BUFFER_ALIGNMENT);
|
|
||||||
|
|
||||||
// Make the TX descriptor ring circular
|
|
||||||
enet_hal_init_txbds(k64f_enet->tx_desc_start_addr + enet_hal_get_bd_size() * (ENET_TX_RING_LEN - 1), 1);
|
|
||||||
|
|
||||||
return ERR_OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** \brief Free TX buffers that are complete
|
/** \brief Free TX buffers that are complete
|
||||||
|
@ -263,125 +99,141 @@ static err_t k64f_tx_setup(struct netif *netif, enet_txbd_config_t *txbdCfg) {
|
||||||
*/
|
*/
|
||||||
static void k64f_tx_reclaim(struct k64f_enetdata *k64f_enet)
|
static void k64f_tx_reclaim(struct k64f_enetdata *k64f_enet)
|
||||||
{
|
{
|
||||||
uint8_t i;
|
uint8_t i = 0 ;
|
||||||
volatile enet_bd_struct_t * bdPtr = (enet_bd_struct_t *)k64f_enet->tx_desc_start_addr;
|
|
||||||
|
|
||||||
/* Get exclusive access */
|
/* Get exclusive access */
|
||||||
sys_mutex_lock(&k64f_enet->TXLockMutex);
|
sys_mutex_lock(&k64f_enet->TXLockMutex);
|
||||||
|
|
||||||
// Traverse all descriptors, looking for the ones modified by the uDMA
|
|
||||||
i = k64f_enet->tx_consume_index;
|
i = k64f_enet->tx_consume_index;
|
||||||
while(i != k64f_enet->tx_produce_index && !(bdPtr[i].control & kEnetTxBdReady)) {
|
// Traverse all descriptors, looking for the ones modified by the uDMA
|
||||||
if (k64f_enet->txb_aligned[i]) {
|
while((i != k64f_enet->tx_produce_index) && (!(g_handle.txBdDirty->control & ENET_BUFFDESCRIPTOR_TX_READY_MASK))) {
|
||||||
free(k64f_enet->txb_aligned[i]);
|
pbuf_free(tx_buff[i]);
|
||||||
k64f_enet->txb_aligned[i] = NULL;
|
if (g_handle.txBdDirty->control & ENET_BUFFDESCRIPTOR_TX_WRAP_MASK)
|
||||||
} else if (k64f_enet->txb[i]) {
|
g_handle.txBdDirty = g_handle.txBdBase;
|
||||||
pbuf_free(k64f_enet->txb[i]);
|
else
|
||||||
k64f_enet->txb[i] = NULL;
|
g_handle.txBdDirty++;
|
||||||
}
|
|
||||||
osSemaphoreRelease(k64f_enet->xTXDCountSem.id);
|
|
||||||
bdPtr[i].controlExtend2 &= ~TX_DESC_UPDATED_MASK;
|
|
||||||
i = (i + 1) % ENET_TX_RING_LEN;
|
i = (i + 1) % ENET_TX_RING_LEN;
|
||||||
}
|
}
|
||||||
k64f_enet->tx_consume_index = i;
|
|
||||||
|
|
||||||
|
k64f_enet->tx_consume_index = i;
|
||||||
/* Restore access */
|
/* Restore access */
|
||||||
sys_mutex_unlock(&k64f_enet->TXLockMutex);
|
sys_mutex_unlock(&k64f_enet->TXLockMutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** \brief Ethernet receive interrupt handler
|
||||||
|
*
|
||||||
|
* This function handles the receive interrupt of K64F.
|
||||||
|
*/
|
||||||
|
void enet_mac_rx_isr()
|
||||||
|
{
|
||||||
|
sys_sem_signal(&k64f_enetdata.RxReadySem);
|
||||||
|
}
|
||||||
|
|
||||||
|
void enet_mac_tx_isr()
|
||||||
|
{
|
||||||
|
sys_sem_signal(&k64f_enetdata.TxCleanSem);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ethernet_callback(ENET_Type *base, enet_handle_t *handle, enet_event_t event, void *param)
|
||||||
|
{
|
||||||
|
switch (event)
|
||||||
|
{
|
||||||
|
case kENET_RxEvent:
|
||||||
|
enet_mac_rx_isr();
|
||||||
|
break;
|
||||||
|
case kENET_TxEvent:
|
||||||
|
enet_mac_tx_isr();
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/** \brief Low level init of the MAC and PHY.
|
/** \brief Low level init of the MAC and PHY.
|
||||||
*
|
*
|
||||||
* \param[in] netif Pointer to LWIP netif structure
|
* \param[in] netif Pointer to LWIP netif structure
|
||||||
*/
|
*/
|
||||||
static err_t low_level_init(struct netif *netif)
|
static err_t low_level_init(struct netif *netif)
|
||||||
{
|
{
|
||||||
enet_dev_if_t * enetIfPtr;
|
struct k64f_enetdata *k64f_enet = netif->state;
|
||||||
uint32_t device = BOARD_DEBUG_ENET_INSTANCE_ADDR;
|
uint8_t i;
|
||||||
enet_rxbd_config_t rxbdCfg;
|
uint32_t sysClock;
|
||||||
enet_txbd_config_t txbdCfg;
|
phy_speed_t phy_speed;
|
||||||
enet_phy_speed_t phy_speed;
|
phy_duplex_t phy_duplex;
|
||||||
enet_phy_duplex_t phy_duplex;
|
uint32_t phyAddr = 0;
|
||||||
|
bool link = false;
|
||||||
|
enet_config_t config;
|
||||||
|
|
||||||
|
// Allocate RX descriptors
|
||||||
|
rx_desc_start_addr = (uint8_t *)calloc(1, sizeof(enet_rx_bd_struct_t) * ENET_RX_RING_LEN + ENET_BUFF_ALIGNMENT);
|
||||||
|
if(!rx_desc_start_addr)
|
||||||
|
return ERR_MEM;
|
||||||
|
|
||||||
|
// Allocate TX descriptors
|
||||||
|
tx_desc_start_addr = (uint8_t *)calloc(1, sizeof(enet_tx_bd_struct_t) * ENET_TX_RING_LEN + ENET_BUFF_ALIGNMENT);
|
||||||
|
if(!tx_desc_start_addr)
|
||||||
|
return ERR_MEM;
|
||||||
|
|
||||||
|
rx_desc_start_addr = (uint8_t *)ENET_ALIGN(rx_desc_start_addr, ENET_BUFF_ALIGNMENT);
|
||||||
|
tx_desc_start_addr = (uint8_t *)ENET_ALIGN(tx_desc_start_addr, ENET_BUFF_ALIGNMENT);
|
||||||
|
|
||||||
|
/* Create buffers for each receive BD */
|
||||||
|
for (i = 0; i < ENET_RX_RING_LEN; i++) {
|
||||||
|
rx_buff[i] = pbuf_alloc(PBUF_RAW, ENET_ETH_MAX_FLEN + ENET_BUFF_ALIGNMENT, PBUF_RAM);
|
||||||
|
if (NULL == rx_buff[i])
|
||||||
|
return ERR_MEM;
|
||||||
|
|
||||||
|
/* K64F note: the next line ensures that the RX buffer is properly aligned for the K64F
|
||||||
|
RX descriptors (16 bytes alignment). However, by doing so, we're effectively changing
|
||||||
|
a data structure which is internal to lwIP. This might not prove to be a good idea
|
||||||
|
in the long run, but a better fix would probably involve modifying lwIP itself */
|
||||||
|
rx_buff[i]->payload = (void*)ENET_ALIGN((uint32_t)rx_buff[i]->payload, ENET_BUFF_ALIGNMENT);
|
||||||
|
rx_ptr[i] = rx_buff[i]->payload;
|
||||||
|
}
|
||||||
|
|
||||||
|
k64f_enet->tx_consume_index = k64f_enet->tx_produce_index = 0;
|
||||||
|
|
||||||
|
/* prepare the buffer configuration. */
|
||||||
|
enet_buffer_config_t buffCfg = {
|
||||||
|
ENET_RX_RING_LEN,
|
||||||
|
ENET_TX_RING_LEN,
|
||||||
|
ENET_ALIGN(ENET_ETH_MAX_FLEN, ENET_BUFF_ALIGNMENT),
|
||||||
|
0,
|
||||||
|
(volatile enet_rx_bd_struct_t *)rx_desc_start_addr,
|
||||||
|
(volatile enet_tx_bd_struct_t *)tx_desc_start_addr,
|
||||||
|
(uint8_t *)&rx_ptr,
|
||||||
|
NULL,
|
||||||
|
};
|
||||||
|
|
||||||
k64f_init_eth_hardware();
|
k64f_init_eth_hardware();
|
||||||
|
|
||||||
/* Initialize device*/
|
sysClock = CLOCK_GetFreq(kCLOCK_CoreSysClk);
|
||||||
enetIfPtr = (enet_dev_if_t *)&enetDevIf[BOARD_DEBUG_ENET_INSTANCE];
|
|
||||||
enetIfPtr->deviceNumber = device;
|
|
||||||
enetIfPtr->macCfgPtr = &g_enetMacCfg[BOARD_DEBUG_ENET_INSTANCE];
|
|
||||||
enetIfPtr->phyCfgPtr = &g_enetPhyCfg[BOARD_DEBUG_ENET_INSTANCE];
|
|
||||||
enetIfPtr->macApiPtr = &g_enetMacApi;
|
|
||||||
enetIfPtr->phyApiPtr = (void *)&g_enetPhyApi;
|
|
||||||
memcpy(enetIfPtr->macCfgPtr->macAddr, (char*)netif->hwaddr, kEnetMacAddrLen);
|
|
||||||
|
|
||||||
/* Allocate buffer for ENET mac context*/
|
ENET_GetDefaultConfig(&config);
|
||||||
enetIfPtr->macContextPtr = (enet_mac_context_t *)calloc(1, sizeof(enet_mac_context_t));
|
|
||||||
if (!enetIfPtr->macContextPtr) {
|
|
||||||
return ERR_BUF;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Initialize enet buffers*/
|
PHY_Init(ENET, 0, sysClock);
|
||||||
if(k64f_rx_setup(netif, &rxbdCfg) != ERR_OK) {
|
PHY_GetLinkStatus(ENET, phyAddr, &link);
|
||||||
return ERR_BUF;
|
if (link)
|
||||||
}
|
|
||||||
/* Initialize enet buffers*/
|
|
||||||
if(k64f_tx_setup(netif, &txbdCfg) != ERR_OK) {
|
|
||||||
return ERR_BUF;
|
|
||||||
}
|
|
||||||
/* Initialize enet module*/
|
|
||||||
if (enet_mac_init(enetIfPtr, &rxbdCfg, &txbdCfg) == kStatus_ENET_Success)
|
|
||||||
{
|
{
|
||||||
/* Initialize PHY*/
|
/* Get link information from PHY */
|
||||||
if (enetIfPtr->macCfgPtr->isPhyAutoDiscover) {
|
PHY_GetLinkSpeedDuplex(ENET, phyAddr, &phy_speed, &phy_duplex);
|
||||||
if (((enet_phy_api_t *)(enetIfPtr->phyApiPtr))->phy_auto_discover(enetIfPtr) != kStatus_PHY_Success)
|
/* Change the MII speed and duplex for actual link status. */
|
||||||
return ERR_IF;
|
config.miiSpeed = (enet_mii_speed_t)phy_speed;
|
||||||
}
|
config.miiDuplex = (enet_mii_duplex_t)phy_duplex;
|
||||||
if (((enet_phy_api_t *)(enetIfPtr->phyApiPtr))->phy_init(enetIfPtr) != kStatus_PHY_Success)
|
config.interrupt = kENET_RxFrameInterrupt | kENET_TxFrameInterrupt;
|
||||||
return ERR_IF;
|
|
||||||
|
|
||||||
enetIfPtr->isInitialized = true;
|
|
||||||
}
|
}
|
||||||
else
|
config.rxMaxFrameLen = ENET_ETH_MAX_FLEN;
|
||||||
{
|
config.macSpecialConfig = kENET_ControlFlowControlEnable;
|
||||||
// TODOETH: cleanup memory
|
config.txAccelerConfig = kENET_TxAccelIsShift16Enabled;
|
||||||
return ERR_IF;
|
config.rxAccelerConfig = kENET_RxAccelisShift16Enabled | kENET_RxAccelMacCheckEnabled;
|
||||||
}
|
ENET_Init(ENET, &g_handle, &config, &buffCfg, netif->hwaddr, sysClock);
|
||||||
|
ENET_SetCallback(&g_handle, ethernet_callback, netif);
|
||||||
/* Get link information from PHY */
|
ENET_ActiveRead(ENET);
|
||||||
phy_get_link_speed(enetIfPtr, &phy_speed);
|
|
||||||
phy_get_link_duplex(enetIfPtr, &phy_duplex);
|
|
||||||
BW_ENET_RCR_RMII_10T(enetIfPtr->deviceNumber, phy_speed == kEnetSpeed10M ? kEnetCfgSpeed10M : kEnetCfgSpeed100M);
|
|
||||||
BW_ENET_TCR_FDEN(enetIfPtr->deviceNumber, phy_duplex == kEnetFullDuplex ? kEnetCfgFullDuplex : kEnetCfgHalfDuplex);
|
|
||||||
|
|
||||||
/* Enable Ethernet module*/
|
|
||||||
enet_hal_config_ethernet(BOARD_DEBUG_ENET_INSTANCE_ADDR, true, true);
|
|
||||||
|
|
||||||
/* Active Receive buffer descriptor must be done after module enable*/
|
|
||||||
enet_hal_active_rxbd(enetIfPtr->deviceNumber);
|
|
||||||
|
|
||||||
return ERR_OK;
|
return ERR_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
/********************************************************************************
|
|
||||||
* LWIP port
|
|
||||||
********************************************************************************/
|
|
||||||
|
|
||||||
/** \brief Ethernet receive interrupt handler
|
|
||||||
*
|
|
||||||
* This function handles the receive interrupt of K64F.
|
|
||||||
*/
|
|
||||||
void enet_mac_rx_isr(void *enetIfPtr)
|
|
||||||
{
|
|
||||||
/* Clear interrupt */
|
|
||||||
enet_hal_clear_interrupt(((enet_dev_if_t *)enetIfPtr)->deviceNumber, kEnetRxFrameInterrupt);
|
|
||||||
sys_sem_signal(&k64f_enetdata.RxReadySem);
|
|
||||||
}
|
|
||||||
|
|
||||||
void enet_mac_tx_isr(void *enetIfPtr)
|
|
||||||
{
|
|
||||||
/*Clear interrupt*/
|
|
||||||
enet_hal_clear_interrupt(((enet_dev_if_t *)enetIfPtr)->deviceNumber, kEnetTxFrameInterrupt);
|
|
||||||
sys_sem_signal(&k64f_enetdata.TxCleanSem);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This function is the ethernet packet send function. It calls
|
* This function is the ethernet packet send function. It calls
|
||||||
|
@ -409,11 +261,13 @@ err_t k64f_etharp_output(struct netif *netif, struct pbuf *q, ip_addr_t *ipaddr)
|
||||||
*/
|
*/
|
||||||
static struct pbuf *k64f_low_level_input(struct netif *netif, int idx)
|
static struct pbuf *k64f_low_level_input(struct netif *netif, int idx)
|
||||||
{
|
{
|
||||||
struct k64f_enetdata *k64f_enet = netif->state;
|
volatile enet_rx_bd_struct_t *bdPtr = g_handle.rxBdCurrent;
|
||||||
enet_bd_struct_t * bdPtr = (enet_bd_struct_t*)k64f_enet->rx_desc_start_addr;
|
|
||||||
struct pbuf *p = NULL;
|
struct pbuf *p = NULL;
|
||||||
u32_t length = 0, orig_length;
|
struct pbuf *temp_rxbuf = NULL;
|
||||||
const u16_t err_mask = kEnetRxBdTrunc | kEnetRxBdCrc | kEnetRxBdNoOctet | kEnetRxBdLengthViolation;
|
u32_t length = 0;
|
||||||
|
const u16_t err_mask = ENET_BUFFDESCRIPTOR_RX_TRUNC_MASK | ENET_BUFFDESCRIPTOR_RX_CRC_MASK |
|
||||||
|
ENET_BUFFDESCRIPTOR_RX_NOOCTET_MASK | ENET_BUFFDESCRIPTOR_RX_LENVLIOLATE_MASK;
|
||||||
|
|
||||||
|
|
||||||
#ifdef LOCK_RX_THREAD
|
#ifdef LOCK_RX_THREAD
|
||||||
/* Get exclusive access */
|
/* Get exclusive access */
|
||||||
|
@ -421,42 +275,32 @@ static struct pbuf *k64f_low_level_input(struct netif *netif, int idx)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Determine if a frame has been received */
|
/* Determine if a frame has been received */
|
||||||
if ((bdPtr[idx].control & err_mask) != 0) {
|
if ((bdPtr->control & err_mask) != 0) {
|
||||||
#if LINK_STATS
|
#if LINK_STATS
|
||||||
if ((bdPtr[idx].control & kEnetRxBdLengthViolation) != 0)
|
if ((bdPtr->control & ENET_BUFFDESCRIPTOR_RX_LENVLIOLATE_MASK) != 0)
|
||||||
LINK_STATS_INC(link.lenerr);
|
LINK_STATS_INC(link.lenerr);
|
||||||
else
|
else
|
||||||
LINK_STATS_INC(link.chkerr);
|
LINK_STATS_INC(link.chkerr);
|
||||||
#endif
|
#endif
|
||||||
LINK_STATS_INC(link.drop);
|
LINK_STATS_INC(link.drop);
|
||||||
|
/* Re-use the same buffer in case of error */
|
||||||
/* Re-queue the same buffer */
|
update_read_buffer(NULL);
|
||||||
k64f_enet->rx_free_descs++;
|
|
||||||
p = k64f_enet->rxb[idx];
|
|
||||||
k64f_enet->rxb[idx] = NULL;
|
|
||||||
k64f_rxqueue_pbuf(k64f_enet, p, idx);
|
|
||||||
p = NULL;
|
|
||||||
} else {
|
} else {
|
||||||
/* A packet is waiting, get length */
|
/* A packet is waiting, get length */
|
||||||
length = enet_hal_get_bd_length(bdPtr + idx);
|
length = bdPtr->length;
|
||||||
|
|
||||||
/* Zero-copy */
|
/* Zero-copy */
|
||||||
p = k64f_enet->rxb[idx];
|
p = rx_buff[idx];
|
||||||
orig_length = p->len;
|
p->len = length;
|
||||||
p->len = (u16_t) length;
|
|
||||||
|
|
||||||
/* Free pbuf from descriptor */
|
|
||||||
k64f_enet->rxb[idx] = NULL;
|
|
||||||
k64f_enet->rx_free_descs++;
|
|
||||||
|
|
||||||
/* Attempt to queue new buffer */
|
/* Attempt to queue new buffer */
|
||||||
if (k64f_rx_queue(netif, idx) == 0) {
|
temp_rxbuf = pbuf_alloc(PBUF_RAW, ENET_ETH_MAX_FLEN + ENET_BUFF_ALIGNMENT, PBUF_RAM);
|
||||||
|
if (NULL == temp_rxbuf) {
|
||||||
/* Drop frame (out of memory) */
|
/* Drop frame (out of memory) */
|
||||||
LINK_STATS_INC(link.drop);
|
LINK_STATS_INC(link.drop);
|
||||||
|
|
||||||
/* Re-queue the same buffer */
|
/* Re-queue the same buffer */
|
||||||
p->len = orig_length;
|
update_read_buffer(NULL);
|
||||||
k64f_rxqueue_pbuf(k64f_enet, p, idx);
|
|
||||||
|
|
||||||
LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
|
LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
|
||||||
("k64f_low_level_input: Packet index %d dropped for OOM\n",
|
("k64f_low_level_input: Packet index %d dropped for OOM\n",
|
||||||
|
@ -468,6 +312,15 @@ static struct pbuf *k64f_low_level_input(struct netif *netif, int idx)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rx_buff[idx] = temp_rxbuf;
|
||||||
|
/* K64F note: the next line ensures that the RX buffer is properly aligned for the K64F
|
||||||
|
RX descriptors (16 bytes alignment). However, by doing so, we're effectively changing
|
||||||
|
a data structure which is internal to lwIP. This might not prove to be a good idea
|
||||||
|
in the long run, but a better fix would probably involve modifying lwIP itself */
|
||||||
|
rx_buff[idx]->payload = (void*)ENET_ALIGN((uint32_t)rx_buff[idx]->payload, ENET_BUFF_ALIGNMENT);
|
||||||
|
rx_ptr[idx] = rx_buff[idx]->payload;
|
||||||
|
|
||||||
|
update_read_buffer(rx_buff[idx]->payload);
|
||||||
LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
|
LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
|
||||||
("k64f_low_level_input: Packet received: %p, size %d (index=%d)\n",
|
("k64f_low_level_input: Packet received: %p, size %d (index=%d)\n",
|
||||||
p, length, idx));
|
p, length, idx));
|
||||||
|
@ -533,14 +386,13 @@ void k64f_enetif_input(struct netif *netif, int idx)
|
||||||
*/
|
*/
|
||||||
static void packet_rx(void* pvParameters) {
|
static void packet_rx(void* pvParameters) {
|
||||||
struct k64f_enetdata *k64f_enet = pvParameters;
|
struct k64f_enetdata *k64f_enet = pvParameters;
|
||||||
volatile enet_bd_struct_t * bdPtr = (enet_bd_struct_t*)k64f_enet->rx_desc_start_addr;
|
|
||||||
int idx = 0;
|
int idx = 0;
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
/* Wait for receive task to wakeup */
|
/* Wait for receive task to wakeup */
|
||||||
sys_arch_sem_wait(&k64f_enet->RxReadySem, 0);
|
sys_arch_sem_wait(&k64f_enet->RxReadySem, 0);
|
||||||
|
|
||||||
while ((bdPtr[idx].control & kEnetRxBdEmpty) == 0) {
|
while ((g_handle.rxBdCurrent->control & ENET_BUFFDESCRIPTOR_RX_EMPTY_MASK) == 0) {
|
||||||
k64f_enetif_input(k64f_enet->netif, idx);
|
k64f_enetif_input(k64f_enet->netif, idx);
|
||||||
idx = (idx + 1) % ENET_RX_RING_LEN;
|
idx = (idx + 1) % ENET_RX_RING_LEN;
|
||||||
}
|
}
|
||||||
|
@ -561,58 +413,10 @@ static void packet_tx(void* pvParameters) {
|
||||||
while (1) {
|
while (1) {
|
||||||
/* Wait for transmit cleanup task to wakeup */
|
/* Wait for transmit cleanup task to wakeup */
|
||||||
sys_arch_sem_wait(&k64f_enet->TxCleanSem, 0);
|
sys_arch_sem_wait(&k64f_enet->TxCleanSem, 0);
|
||||||
// TODOETH: handle TX underrun?
|
|
||||||
k64f_tx_reclaim(k64f_enet);
|
k64f_tx_reclaim(k64f_enet);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/** \brief Polls if an available TX descriptor is ready. Can be used to
|
|
||||||
* determine if the low level transmit function will block.
|
|
||||||
*
|
|
||||||
* \param[in] netif the lwip network interface structure
|
|
||||||
* \return 0 if no descriptors are read, or >0
|
|
||||||
*/
|
|
||||||
s32_t k64f_tx_ready(struct netif *netif)
|
|
||||||
{
|
|
||||||
struct k64f_enetdata *k64f_enet = netif->state;
|
|
||||||
s32_t fb;
|
|
||||||
u32_t idx, cidx;
|
|
||||||
|
|
||||||
cidx = k64f_enet->tx_consume_index;
|
|
||||||
idx = k64f_enet->tx_produce_index;
|
|
||||||
|
|
||||||
/* Determine number of free buffers */
|
|
||||||
if (idx == cidx)
|
|
||||||
fb = ENET_TX_RING_LEN;
|
|
||||||
else if (cidx > idx)
|
|
||||||
fb = (ENET_TX_RING_LEN - 1) -
|
|
||||||
((idx + ENET_TX_RING_LEN) - cidx);
|
|
||||||
else
|
|
||||||
fb = (ENET_TX_RING_LEN - 1) - (cidx - idx);
|
|
||||||
|
|
||||||
return fb;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*FUNCTION****************************************************************
|
|
||||||
*
|
|
||||||
* Function Name: enet_hal_update_txbds
|
|
||||||
* Description: Update ENET transmit buffer descriptors.
|
|
||||||
*END*********************************************************************/
|
|
||||||
void k64f_update_txbds(struct k64f_enetdata *k64f_enet, int idx, uint8_t *buffer, uint16_t length, bool isLast)
|
|
||||||
{
|
|
||||||
volatile enet_bd_struct_t * bdPtr = (enet_bd_struct_t *)(k64f_enet->tx_desc_start_addr + idx * enet_hal_get_bd_size());
|
|
||||||
|
|
||||||
bdPtr->length = HTONS(length); /* Set data length*/
|
|
||||||
bdPtr->buffer = (uint8_t *)HTONL((uint32_t)buffer); /* Set data buffer*/
|
|
||||||
if (isLast)
|
|
||||||
bdPtr->control |= kEnetTxBdLast;
|
|
||||||
else
|
|
||||||
bdPtr->control &= ~kEnetTxBdLast;
|
|
||||||
bdPtr->controlExtend1 |= kEnetTxBdTxInterrupt;
|
|
||||||
bdPtr->controlExtend2 &= ~TX_DESC_UPDATED_MASK; // descriptor not updated by DMA
|
|
||||||
bdPtr->control |= kEnetTxBdTransmitCrc | kEnetTxBdReady;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** \brief Low level output of a packet. Never call this from an
|
/** \brief Low level output of a packet. Never call this from an
|
||||||
* interrupt context, as it may block until TX descriptors
|
* interrupt context, as it may block until TX descriptors
|
||||||
* become available.
|
* become available.
|
||||||
|
@ -625,83 +429,51 @@ static err_t k64f_low_level_output(struct netif *netif, struct pbuf *p)
|
||||||
{
|
{
|
||||||
struct k64f_enetdata *k64f_enet = netif->state;
|
struct k64f_enetdata *k64f_enet = netif->state;
|
||||||
struct pbuf *q;
|
struct pbuf *q;
|
||||||
u32_t idx;
|
struct pbuf *temp_pbuf;
|
||||||
s32_t dn;
|
|
||||||
uint8_t *psend = NULL, *dst;
|
uint8_t *psend = NULL, *dst;
|
||||||
|
|
||||||
/* Get free TX buffer index */
|
|
||||||
idx = k64f_enet->tx_produce_index;
|
|
||||||
|
|
||||||
/* Check the pbuf chain for payloads that are not 8-byte aligned.
|
temp_pbuf = pbuf_alloc(PBUF_RAW, p->tot_len + ENET_BUFF_ALIGNMENT, PBUF_RAM);
|
||||||
If found, a new properly aligned buffer needs to be allocated
|
if (NULL == temp_pbuf)
|
||||||
and the data copied there */
|
return ERR_MEM;
|
||||||
for (q = p; q != NULL; q = q->next)
|
|
||||||
if (((u32_t)q->payload & (TX_BUF_ALIGNMENT - 1)) != 0)
|
/* K64F note: the next line ensures that the RX buffer is properly aligned for the K64F
|
||||||
break;
|
RX descriptors (16 bytes alignment). However, by doing so, we're effectively changing
|
||||||
if (q != NULL) {
|
a data structure which is internal to lwIP. This might not prove to be a good idea
|
||||||
// Allocate properly aligned buffer
|
in the long run, but a better fix would probably involve modifying lwIP itself */
|
||||||
psend = (uint8_t*)malloc(p->tot_len);
|
psend = (uint8_t *)ENET_ALIGN((uint32_t)temp_pbuf->payload, ENET_BUFF_ALIGNMENT);
|
||||||
if (NULL == psend)
|
|
||||||
return ERR_MEM;
|
for (q = p, dst = psend; q != NULL; q = q->next) {
|
||||||
LWIP_ASSERT("k64f_low_level_output: buffer not properly aligned", ((u32_t)psend & (TX_BUF_ALIGNMENT - 1)) == 0);
|
MEMCPY(dst, q->payload, q->len);
|
||||||
for (q = p, dst = psend; q != NULL; q = q->next) {
|
dst += q->len;
|
||||||
MEMCPY(dst, q->payload, q->len);
|
|
||||||
dst += q->len;
|
|
||||||
}
|
|
||||||
k64f_enet->txb_aligned[idx] = psend;
|
|
||||||
dn = 1;
|
|
||||||
} else {
|
|
||||||
k64f_enet->txb_aligned[idx] = NULL;
|
|
||||||
dn = (s32_t) pbuf_clen(p);
|
|
||||||
pbuf_ref(p);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Wait until enough descriptors are available for the transfer. */
|
/* Wait until a descriptor is available for the transfer. */
|
||||||
/* THIS WILL BLOCK UNTIL THERE ARE ENOUGH DESCRIPTORS AVAILABLE */
|
/* THIS WILL BLOCK UNTIL THERE ARE A DESCRIPTOR AVAILABLE */
|
||||||
while (dn > k64f_tx_ready(netif))
|
while (g_handle.txBdCurrent->control & ENET_BUFFDESCRIPTOR_TX_READY_MASK)
|
||||||
osSemaphoreWait(k64f_enet->xTXDCountSem.id, osWaitForever);
|
osSemaphoreWait(k64f_enet->xTXDCountSem.id, osWaitForever);
|
||||||
|
|
||||||
/* Get exclusive access */
|
/* Get exclusive access */
|
||||||
sys_mutex_lock(&k64f_enet->TXLockMutex);
|
sys_mutex_lock(&k64f_enet->TXLockMutex);
|
||||||
|
|
||||||
|
/* Save the buffer so that it can be freed when transmit is done */
|
||||||
|
tx_buff[k64f_enet->tx_produce_index] = temp_pbuf;
|
||||||
|
k64f_enet->tx_produce_index = (k64f_enet->tx_produce_index + 1) % ENET_TX_RING_LEN;
|
||||||
|
|
||||||
/* Setup transfers */
|
/* Setup transfers */
|
||||||
q = p;
|
g_handle.txBdCurrent->buffer = psend;
|
||||||
while (dn > 0) {
|
g_handle.txBdCurrent->length = p->tot_len;
|
||||||
dn--;
|
g_handle.txBdCurrent->control |= (ENET_BUFFDESCRIPTOR_TX_READY_MASK | ENET_BUFFDESCRIPTOR_TX_LAST_MASK);
|
||||||
if (psend != NULL) {
|
|
||||||
k64f_update_txbds(k64f_enet, idx, psend, p->tot_len, 1);
|
|
||||||
k64f_enet->txb[idx] = NULL;
|
|
||||||
|
|
||||||
LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
|
/* Increase the buffer descriptor address. */
|
||||||
("k64f_low_level_output: aligned packet(%p) sent"
|
if (g_handle.txBdCurrent->control & ENET_BUFFDESCRIPTOR_TX_WRAP_MASK)
|
||||||
" size = %d (index=%d)\n", psend, p->tot_len, idx));
|
g_handle.txBdCurrent = g_handle.txBdBase;
|
||||||
} else {
|
else
|
||||||
LWIP_ASSERT("k64f_low_level_output: buffer not properly aligned", ((u32_t)q->payload & 0x07) == 0);
|
g_handle.txBdCurrent++;
|
||||||
|
|
||||||
/* Only save pointer to free on last descriptor */
|
/* Active the transmit buffer descriptor. */
|
||||||
if (dn == 0) {
|
ENET->TDAR = ENET_TDAR_TDAR_MASK;
|
||||||
/* Save size of packet and signal it's ready */
|
|
||||||
k64f_update_txbds(k64f_enet, idx, q->payload, q->len, 1);
|
|
||||||
k64f_enet->txb[idx] = p;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
/* Save size of packet, descriptor is not last */
|
|
||||||
k64f_update_txbds(k64f_enet, idx, q->payload, q->len, 0);
|
|
||||||
k64f_enet->txb[idx] = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
|
|
||||||
("k64f_low_level_output: pbuf packet(%p) sent, chain#=%d,"
|
|
||||||
" size = %d (index=%d)\n", q->payload, dn, q->len, idx));
|
|
||||||
}
|
|
||||||
|
|
||||||
q = q->next;
|
|
||||||
|
|
||||||
idx = (idx + 1) % ENET_TX_RING_LEN;
|
|
||||||
}
|
|
||||||
|
|
||||||
k64f_enet->tx_produce_index = idx;
|
|
||||||
enet_hal_active_txbd(BOARD_DEBUG_ENET_INSTANCE_ADDR);
|
|
||||||
LINK_STATS_INC(link.xmit);
|
LINK_STATS_INC(link.xmit);
|
||||||
|
|
||||||
/* Restore access */
|
/* Restore access */
|
||||||
|
@ -719,31 +491,33 @@ static err_t k64f_low_level_output(struct netif *netif, struct pbuf *p)
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int connected;
|
int connected;
|
||||||
enet_phy_speed_t speed;
|
phy_speed_t speed;
|
||||||
enet_phy_duplex_t duplex;
|
phy_duplex_t duplex;
|
||||||
} PHY_STATE;
|
} PHY_STATE;
|
||||||
|
|
||||||
int phy_link_status() {
|
int phy_link_status() {
|
||||||
bool connection_status;
|
bool connection_status;
|
||||||
enet_dev_if_t * enetIfPtr = (enet_dev_if_t*)&enetDevIf[BOARD_DEBUG_ENET_INSTANCE];
|
uint32_t phyAddr = 0;
|
||||||
phy_get_link_status(enetIfPtr, &connection_status);
|
|
||||||
|
PHY_GetLinkStatus(ENET, phyAddr, &connection_status);
|
||||||
return (int)connection_status;
|
return (int)connection_status;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void k64f_phy_task(void *data) {
|
static void k64f_phy_task(void *data) {
|
||||||
struct netif *netif = (struct netif*)data;
|
struct netif *netif = (struct netif*)data;
|
||||||
bool connection_status;
|
bool connection_status;
|
||||||
enet_dev_if_t * enetIfPtr = (enet_dev_if_t*)&enetDevIf[BOARD_DEBUG_ENET_INSTANCE];
|
PHY_STATE crt_state = {STATE_UNKNOWN, (phy_speed_t)STATE_UNKNOWN, (phy_duplex_t)STATE_UNKNOWN};
|
||||||
PHY_STATE crt_state = {STATE_UNKNOWN, (enet_phy_speed_t)STATE_UNKNOWN, (enet_phy_duplex_t)STATE_UNKNOWN};
|
|
||||||
PHY_STATE prev_state;
|
PHY_STATE prev_state;
|
||||||
|
uint32_t phyAddr = 0;
|
||||||
|
uint32_t rcr = 0;
|
||||||
|
|
||||||
prev_state = crt_state;
|
prev_state = crt_state;
|
||||||
while (true) {
|
while (true) {
|
||||||
// Get current status
|
// Get current status
|
||||||
phy_get_link_status(enetIfPtr, &connection_status);
|
PHY_GetLinkStatus(ENET, phyAddr, &connection_status);
|
||||||
crt_state.connected = connection_status ? 1 : 0;
|
crt_state.connected = connection_status ? 1 : 0;
|
||||||
phy_get_link_speed(enetIfPtr, &crt_state.speed);
|
// Get the actual PHY link speed
|
||||||
phy_get_link_duplex(enetIfPtr, &crt_state.duplex);
|
PHY_GetLinkSpeedDuplex(ENET, phyAddr, &crt_state.speed, &crt_state.duplex);
|
||||||
|
|
||||||
// Compare with previous state
|
// Compare with previous state
|
||||||
if (crt_state.connected != prev_state.connected) {
|
if (crt_state.connected != prev_state.connected) {
|
||||||
|
@ -753,10 +527,12 @@ static void k64f_phy_task(void *data) {
|
||||||
tcpip_callback_with_block((tcpip_callback_fn)netif_set_link_down, (void*) netif, 1);
|
tcpip_callback_with_block((tcpip_callback_fn)netif_set_link_down, (void*) netif, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (crt_state.speed != prev_state.speed)
|
if (crt_state.speed != prev_state.speed) {
|
||||||
BW_ENET_RCR_RMII_10T(enetIfPtr->deviceNumber, crt_state.speed == kEnetSpeed10M ? kEnetCfgSpeed10M : kEnetCfgSpeed100M);
|
rcr = ENET->RCR;
|
||||||
|
rcr &= ~ENET_RCR_RMII_10T_MASK;
|
||||||
// TODO: duplex change requires disable/enable of Ethernet interface, to be implemented
|
rcr |= ENET_RCR_RMII_10T(!crt_state.speed);
|
||||||
|
ENET->RCR = rcr;
|
||||||
|
}
|
||||||
|
|
||||||
prev_state = crt_state;
|
prev_state = crt_state;
|
||||||
osDelay(PHY_TASK_PERIOD_MS);
|
osDelay(PHY_TASK_PERIOD_MS);
|
||||||
|
@ -851,32 +627,14 @@ err_t eth_arch_enetif_init(struct netif *netif)
|
||||||
}
|
}
|
||||||
|
|
||||||
void eth_arch_enable_interrupts(void) {
|
void eth_arch_enable_interrupts(void) {
|
||||||
enet_hal_config_interrupt(BOARD_DEBUG_ENET_INSTANCE_ADDR, (kEnetTxFrameInterrupt | kEnetRxFrameInterrupt), true);
|
//NVIC_SetPriority(ENET_Receive_IRQn, 6U);
|
||||||
INT_SYS_EnableIRQ(enet_irq_ids[BOARD_DEBUG_ENET_INSTANCE][enetIntMap[kEnetRxfInt]]);
|
//NVIC_SetPriority(ENET_Transmit_IRQn, 6U);
|
||||||
INT_SYS_EnableIRQ(enet_irq_ids[BOARD_DEBUG_ENET_INSTANCE][enetIntMap[kEnetTxfInt]]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void eth_arch_disable_interrupts(void) {
|
void eth_arch_disable_interrupts(void) {
|
||||||
INT_SYS_DisableIRQ(enet_irq_ids[BOARD_DEBUG_ENET_INSTANCE][enetIntMap[kEnetRxfInt]]);
|
|
||||||
INT_SYS_DisableIRQ(enet_irq_ids[BOARD_DEBUG_ENET_INSTANCE][enetIntMap[kEnetTxfInt]]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ENET_Transmit_IRQHandler(void)
|
|
||||||
{
|
|
||||||
enet_mac_tx_isr(enetIfHandle);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ENET_Receive_IRQHandler(void)
|
|
||||||
{
|
|
||||||
enet_mac_rx_isr(enetIfHandle);
|
|
||||||
}
|
|
||||||
|
|
||||||
#if FSL_FEATURE_ENET_SUPPORT_PTP
|
|
||||||
void ENET_1588_Timer_IRQHandler(void)
|
|
||||||
{
|
|
||||||
enet_mac_ts_isr(enetIfHandle);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
/**
|
/**
|
||||||
* @}
|
* @}
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -30,17 +30,10 @@
|
||||||
#ifndef K64F_EMAC_CONFIG_H__
|
#ifndef K64F_EMAC_CONFIG_H__
|
||||||
#define K64F_EMAC_CONFIG_H__
|
#define K64F_EMAC_CONFIG_H__
|
||||||
|
|
||||||
|
#include "fsl_enet.h"
|
||||||
|
|
||||||
#define ENET_RX_RING_LEN (16)
|
#define ENET_RX_RING_LEN (16)
|
||||||
#define ENET_TX_RING_LEN (8)
|
#define ENET_TX_RING_LEN (8)
|
||||||
#define ENET_RX_LARGE_BUFFER_NUM (0)
|
|
||||||
#define ENET_RX_BUFFER_ALIGNMENT (16)
|
|
||||||
#define ENET_TX_BUFFER_ALIGNMENT (16)
|
|
||||||
#define ENET_BD_ALIGNMENT (16)
|
|
||||||
#define ENET_MII_CLOCK (2500000L)
|
|
||||||
#define RX_BUF_ALIGNMENT (16)
|
|
||||||
#define TX_BUF_ALIGNMENT (8)
|
|
||||||
#define BOARD_DEBUG_ENET_INSTANCE (0)
|
|
||||||
#define BOARD_DEBUG_ENET_INSTANCE_ADDR (ENET_BASE)
|
|
||||||
|
|
||||||
#define ENET_ETH_MAX_FLEN (1522) // recommended size for a VLAN frame
|
#define ENET_ETH_MAX_FLEN (1522) // recommended size for a VLAN frame
|
||||||
|
|
||||||
|
|
|
@ -24,6 +24,6 @@
|
||||||
#define LWIP_TRANSPORT_ETHERNET 1
|
#define LWIP_TRANSPORT_ETHERNET 1
|
||||||
#define ETH_PAD_SIZE 2
|
#define ETH_PAD_SIZE 2
|
||||||
|
|
||||||
#define MEM_SIZE (ENET_RX_RING_LEN * (ENET_ETH_MAX_FLEN + RX_BUF_ALIGNMENT) + ENET_TX_RING_LEN * ENET_ETH_MAX_FLEN)
|
#define MEM_SIZE (ENET_RX_RING_LEN * (ENET_ETH_MAX_FLEN + ENET_BUFF_ALIGNMENT) + ENET_TX_RING_LEN * ENET_ETH_MAX_FLEN)
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -0,0 +1,26 @@
|
||||||
|
/* Copyright (C) 2012 mbed.org, MIT License
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software
|
||||||
|
* and associated documentation files (the "Software"), to deal in the Software without restriction,
|
||||||
|
* including without limitation the rights to use, copy, modify, merge, publish, distribute,
|
||||||
|
* sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
|
||||||
|
* furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in all copies or
|
||||||
|
* substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
|
||||||
|
* BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||||||
|
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef LWIPOPTS_CONF_H
|
||||||
|
#define LWIPOPTS_CONF_H
|
||||||
|
|
||||||
|
#define LWIP_TRANSPORT_ETHERNET 1
|
||||||
|
|
||||||
|
#define MEM_SIZE (1600 * 16)
|
||||||
|
|
||||||
|
#endif
|
|
@ -0,0 +1,194 @@
|
||||||
|
#include "lwip/opt.h"
|
||||||
|
#include "lwip/tcpip.h"
|
||||||
|
#include "netif/etharp.h"
|
||||||
|
#include "mbed_interface.h"
|
||||||
|
#include "ethernet_api.h"
|
||||||
|
#include "ethernetext_api.h"
|
||||||
|
|
||||||
|
#define RECV_TASK_PRI (osPriorityNormal)
|
||||||
|
#define PHY_TASK_PRI (osPriorityNormal)
|
||||||
|
#define PHY_TASK_WAIT (200)
|
||||||
|
|
||||||
|
/* memory */
|
||||||
|
static sys_sem_t recv_ready_sem; /* receive ready semaphore */
|
||||||
|
|
||||||
|
/* function */
|
||||||
|
static void rza1_recv_task(void *arg);
|
||||||
|
static void rza1_phy_task(void *arg);
|
||||||
|
static err_t rza1_etharp_output(struct netif *netif, struct pbuf *q, ip_addr_t *ipaddr);
|
||||||
|
static err_t rza1_low_level_output(struct netif *netif, struct pbuf *p);
|
||||||
|
static void rza1_recv_callback(void);
|
||||||
|
|
||||||
|
static void rza1_recv_task(void *arg) {
|
||||||
|
struct netif *netif = (struct netif*)arg;
|
||||||
|
struct eth_hdr *ethhdr;
|
||||||
|
u16_t recv_size;
|
||||||
|
struct pbuf *p;
|
||||||
|
int cnt;
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
sys_arch_sem_wait(&recv_ready_sem, 0);
|
||||||
|
for (cnt = 0; cnt < 16; cnt++) {
|
||||||
|
recv_size = ethernet_receive();
|
||||||
|
if (recv_size != 0) {
|
||||||
|
p = pbuf_alloc(PBUF_RAW, recv_size, PBUF_RAM);
|
||||||
|
if (p != NULL) {
|
||||||
|
(void)ethernet_read((char *)p->payload, p->len);
|
||||||
|
ethhdr = p->payload;
|
||||||
|
switch (htons(ethhdr->type)) {
|
||||||
|
case ETHTYPE_IP:
|
||||||
|
case ETHTYPE_ARP:
|
||||||
|
#if PPPOE_SUPPORT
|
||||||
|
case ETHTYPE_PPPOEDISC:
|
||||||
|
case ETHTYPE_PPPOE:
|
||||||
|
#endif /* PPPOE_SUPPORT */
|
||||||
|
/* full packet send to tcpip_thread to process */
|
||||||
|
if (netif->input(p, netif) != ERR_OK) {
|
||||||
|
/* Free buffer */
|
||||||
|
pbuf_free(p);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
/* Return buffer */
|
||||||
|
pbuf_free(p);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void rza1_phy_task(void *arg) {
|
||||||
|
struct netif *netif = (struct netif*)arg;
|
||||||
|
s32_t connect_sts = 0; /* 0: disconnect, 1:connect */
|
||||||
|
s32_t link_sts;
|
||||||
|
s32_t link_mode_new = NEGO_FAIL;
|
||||||
|
s32_t link_mode_old = NEGO_FAIL;
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
link_sts = ethernet_link();
|
||||||
|
if (link_sts == 1) {
|
||||||
|
link_mode_new = ethernetext_chk_link_mode();
|
||||||
|
if (link_mode_new != link_mode_old) {
|
||||||
|
if (connect_sts == 1) {
|
||||||
|
tcpip_callback_with_block((tcpip_callback_fn)netif_set_link_down, (void*) netif, 1);
|
||||||
|
}
|
||||||
|
if (link_mode_new != NEGO_FAIL) {
|
||||||
|
ethernetext_set_link_mode(link_mode_new);
|
||||||
|
tcpip_callback_with_block((tcpip_callback_fn)netif_set_link_up, (void*) netif, 1);
|
||||||
|
connect_sts = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (connect_sts != 0) {
|
||||||
|
tcpip_callback_with_block((tcpip_callback_fn)netif_set_link_down, (void*) netif, 1);
|
||||||
|
link_mode_new = NEGO_FAIL;
|
||||||
|
connect_sts = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
link_mode_old = link_mode_new;
|
||||||
|
osDelay(PHY_TASK_WAIT);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static err_t rza1_etharp_output(struct netif *netif, struct pbuf *q, ip_addr_t *ipaddr) {
|
||||||
|
/* Only send packet is link is up */
|
||||||
|
if (netif->flags & NETIF_FLAG_LINK_UP) {
|
||||||
|
return etharp_output(netif, q, ipaddr);
|
||||||
|
}
|
||||||
|
|
||||||
|
return ERR_CONN;
|
||||||
|
}
|
||||||
|
|
||||||
|
static err_t rza1_low_level_output(struct netif *netif, struct pbuf *p) {
|
||||||
|
struct pbuf *q;
|
||||||
|
s32_t cnt;
|
||||||
|
err_t err = ERR_MEM;
|
||||||
|
s32_t write_size = 0;
|
||||||
|
|
||||||
|
if ((p->payload != NULL) && (p->len != 0)) {
|
||||||
|
/* If the first data can't be written, transmit descriptor is full. */
|
||||||
|
for (cnt = 0; cnt < 100; cnt++) {
|
||||||
|
write_size = ethernet_write((char *)p->payload, p->len);
|
||||||
|
if (write_size != 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
osDelay(1);
|
||||||
|
}
|
||||||
|
if (write_size != 0) {
|
||||||
|
for (q = p->next; q != NULL; q = q->next) {
|
||||||
|
(void)ethernet_write((char *)q->payload, q->len);
|
||||||
|
}
|
||||||
|
if (ethernet_send() == 1) {
|
||||||
|
err = ERR_OK;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void rza1_recv_callback(void) {
|
||||||
|
sys_sem_signal(&recv_ready_sem);
|
||||||
|
}
|
||||||
|
|
||||||
|
err_t eth_arch_enetif_init(struct netif *netif)
|
||||||
|
{
|
||||||
|
ethernet_cfg_t ethcfg;
|
||||||
|
|
||||||
|
/* set MAC hardware address */
|
||||||
|
#if (MBED_MAC_ADDRESS_SUM != MBED_MAC_ADDR_INTERFACE)
|
||||||
|
netif->hwaddr[0] = MBED_MAC_ADDR_0;
|
||||||
|
netif->hwaddr[1] = MBED_MAC_ADDR_1;
|
||||||
|
netif->hwaddr[2] = MBED_MAC_ADDR_2;
|
||||||
|
netif->hwaddr[3] = MBED_MAC_ADDR_3;
|
||||||
|
netif->hwaddr[4] = MBED_MAC_ADDR_4;
|
||||||
|
netif->hwaddr[5] = MBED_MAC_ADDR_5;
|
||||||
|
#else
|
||||||
|
mbed_mac_address((char *)netif->hwaddr);
|
||||||
|
#endif
|
||||||
|
netif->hwaddr_len = ETHARP_HWADDR_LEN;
|
||||||
|
|
||||||
|
/* maximum transfer unit */
|
||||||
|
netif->mtu = 1500;
|
||||||
|
|
||||||
|
/* device capabilities */
|
||||||
|
netif->flags = NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP | NETIF_FLAG_ETHERNET | NETIF_FLAG_IGMP;
|
||||||
|
|
||||||
|
#if LWIP_NETIF_HOSTNAME
|
||||||
|
/* Initialize interface hostname */
|
||||||
|
netif->hostname = "lwiprza1";
|
||||||
|
#endif /* LWIP_NETIF_HOSTNAME */
|
||||||
|
|
||||||
|
netif->name[0] = 'e';
|
||||||
|
netif->name[1] = 'n';
|
||||||
|
|
||||||
|
netif->output = rza1_etharp_output;
|
||||||
|
netif->linkoutput = rza1_low_level_output;
|
||||||
|
|
||||||
|
/* Initialize the hardware */
|
||||||
|
ethcfg.int_priority = 6;
|
||||||
|
ethcfg.recv_cb = &rza1_recv_callback;
|
||||||
|
ethcfg.ether_mac = (char *)netif->hwaddr;
|
||||||
|
ethernetext_init(ðcfg);
|
||||||
|
|
||||||
|
/* semaphore */
|
||||||
|
sys_sem_new(&recv_ready_sem, 0);
|
||||||
|
|
||||||
|
/* task */
|
||||||
|
sys_thread_new("rza1_recv_task", rza1_recv_task, netif, DEFAULT_THREAD_STACKSIZE, RECV_TASK_PRI);
|
||||||
|
sys_thread_new("rza1_phy_task", rza1_phy_task, netif, DEFAULT_THREAD_STACKSIZE, PHY_TASK_PRI);
|
||||||
|
|
||||||
|
return ERR_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
void eth_arch_enable_interrupts(void) {
|
||||||
|
ethernetext_start_stop(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
void eth_arch_disable_interrupts(void) {
|
||||||
|
ethernetext_start_stop(0);
|
||||||
|
}
|
Loading…
Reference in New Issue