Skip to content

Commit

Permalink
Merge branch 'axienet-fixes'
Browse files Browse the repository at this point in the history
Robert Hancock says:

====================
Xilinx axienet fixes

Various fixes for the Xilinx AXI Ethernet driver.

Changed since v2:
-added Reviewed-by tags, added some explanation to commit
messages, no code changes

Changed since v1:
-corrected a Fixes tag to point to mainline commit
-split up reset changes into 3 patches
-added ratelimit on netdev_warn in TX busy case
====================

Signed-off-by: David S. Miller <[email protected]>
  • Loading branch information
davem330 committed Jan 19, 2022
2 parents 9984522 + 2d19c3f commit 8c8963b
Showing 1 changed file with 84 additions and 51 deletions.
135 changes: 84 additions & 51 deletions drivers/net/ethernet/xilinx/xilinx_axienet_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,9 @@
#include "xilinx_axienet.h"

/* Descriptors defines for Tx and Rx DMA */
#define TX_BD_NUM_DEFAULT 64
#define TX_BD_NUM_DEFAULT 128
#define RX_BD_NUM_DEFAULT 1024
#define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1)
#define TX_BD_NUM_MAX 4096
#define RX_BD_NUM_MAX 4096

Expand Down Expand Up @@ -496,7 +497,8 @@ static void axienet_setoptions(struct net_device *ndev, u32 options)

static int __axienet_device_reset(struct axienet_local *lp)
{
u32 timeout;
u32 value;
int ret;

/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
* process of Axi DMA takes a while to complete as all pending
Expand All @@ -506,15 +508,23 @@ static int __axienet_device_reset(struct axienet_local *lp)
* they both reset the entire DMA core, so only one needs to be used.
*/
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
timeout = DELAY_OF_ONE_MILLISEC;
while (axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET) &
XAXIDMA_CR_RESET_MASK) {
udelay(1);
if (--timeout == 0) {
netdev_err(lp->ndev, "%s: DMA reset timeout!\n",
__func__);
return -ETIMEDOUT;
}
ret = read_poll_timeout(axienet_dma_in32, value,
!(value & XAXIDMA_CR_RESET_MASK),
DELAY_OF_ONE_MILLISEC, 50000, false, lp,
XAXIDMA_TX_CR_OFFSET);
if (ret) {
dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
return ret;
}

/* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
ret = read_poll_timeout(axienet_ior, value,
value & XAE_INT_PHYRSTCMPLT_MASK,
DELAY_OF_ONE_MILLISEC, 50000, false, lp,
XAE_IS_OFFSET);
if (ret) {
dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
return ret;
}

return 0;
Expand Down Expand Up @@ -623,6 +633,8 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
break;

/* Ensure we see complete descriptor update */
dma_rmb();
phys = desc_get_phys_addr(lp, cur_p);
dma_unmap_single(ndev->dev.parent, phys,
(cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
Expand All @@ -631,13 +643,15 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
dev_consume_skb_irq(cur_p->skb);

cur_p->cntrl = 0;
cur_p->app0 = 0;
cur_p->app1 = 0;
cur_p->app2 = 0;
cur_p->app4 = 0;
cur_p->status = 0;
cur_p->skb = NULL;
/* ensure our transmit path and device don't prematurely see status cleared */
wmb();
cur_p->cntrl = 0;
cur_p->status = 0;

if (sizep)
*sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
Expand All @@ -646,6 +660,32 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
return i;
}

/**
* axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
* @lp: Pointer to the axienet_local structure
* @num_frag: The number of BDs to check for
*
* Return: 0, on success
* NETDEV_TX_BUSY, if any of the descriptors are not free
*
* This function is invoked before BDs are allocated and transmission starts.
* This function returns 0 if a BD or group of BDs can be allocated for
* transmission. If the BD or any of the BDs are not free the function
* returns a busy status. This is invoked from axienet_start_xmit.
*/
static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
int num_frag)
{
struct axidma_bd *cur_p;

/* Ensure we see all descriptor updates from device or TX IRQ path */
rmb();
cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num];
if (cur_p->cntrl)
return NETDEV_TX_BUSY;
return 0;
}

/**
* axienet_start_xmit_done - Invoked once a transmit is completed by the
* Axi DMA Tx channel.
Expand Down Expand Up @@ -675,30 +715,8 @@ static void axienet_start_xmit_done(struct net_device *ndev)
/* Matches barrier in axienet_start_xmit */
smp_mb();

netif_wake_queue(ndev);
}

/**
* axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
* @lp: Pointer to the axienet_local structure
* @num_frag: The number of BDs to check for
*
* Return: 0, on success
* NETDEV_TX_BUSY, if any of the descriptors are not free
*
* This function is invoked before BDs are allocated and transmission starts.
* This function returns 0 if a BD or group of BDs can be allocated for
* transmission. If the BD or any of the BDs are not free the function
* returns a busy status. This is invoked from axienet_start_xmit.
*/
static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
int num_frag)
{
struct axidma_bd *cur_p;
cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num];
if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
return NETDEV_TX_BUSY;
return 0;
if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
netif_wake_queue(ndev);
}

/**
Expand Down Expand Up @@ -730,20 +748,15 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
num_frag = skb_shinfo(skb)->nr_frags;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];

if (axienet_check_tx_bd_space(lp, num_frag)) {
if (netif_queue_stopped(ndev))
return NETDEV_TX_BUSY;

if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
/* Should not happen as last start_xmit call should have
* checked for sufficient space and queue should only be
* woken when sufficient space is available.
*/
netif_stop_queue(ndev);

/* Matches barrier in axienet_start_xmit_done */
smp_mb();

/* Space might have just been freed - check again */
if (axienet_check_tx_bd_space(lp, num_frag))
return NETDEV_TX_BUSY;

netif_wake_queue(ndev);
if (net_ratelimit())
netdev_warn(ndev, "TX ring unexpectedly full\n");
return NETDEV_TX_BUSY;
}

if (skb->ip_summed == CHECKSUM_PARTIAL) {
Expand Down Expand Up @@ -804,6 +817,18 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (++lp->tx_bd_tail >= lp->tx_bd_num)
lp->tx_bd_tail = 0;

/* Stop queue if next transmit may not have space */
if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
netif_stop_queue(ndev);

/* Matches barrier in axienet_start_xmit_done */
smp_mb();

/* Space might have just been freed - check again */
if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
netif_wake_queue(ndev);
}

return NETDEV_TX_OK;
}

Expand Down Expand Up @@ -834,6 +859,8 @@ static void axienet_recv(struct net_device *ndev)

tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;

/* Ensure we see complete descriptor update */
dma_rmb();
phys = desc_get_phys_addr(lp, cur_p);
dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size,
DMA_FROM_DEVICE);
Expand Down Expand Up @@ -1352,7 +1379,8 @@ axienet_ethtools_set_ringparam(struct net_device *ndev,
if (ering->rx_pending > RX_BD_NUM_MAX ||
ering->rx_mini_pending ||
ering->rx_jumbo_pending ||
ering->rx_pending > TX_BD_NUM_MAX)
ering->tx_pending < TX_BD_NUM_MIN ||
ering->tx_pending > TX_BD_NUM_MAX)
return -EINVAL;

if (netif_running(ndev))
Expand Down Expand Up @@ -2027,6 +2055,11 @@ static int axienet_probe(struct platform_device *pdev)
lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;

/* Reset core now that clocks are enabled, prior to accessing MDIO */
ret = __axienet_device_reset(lp);
if (ret)
goto cleanup_clk;

lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
if (lp->phy_node) {
ret = axienet_mdio_setup(lp);
Expand Down

0 comments on commit 8c8963b

Please sign in to comment.