Skip to content

Commit

Permalink
mvpp2: Ensure driver works with netmap buffers in pools
Browse files Browse the repository at this point in the history
The previous change for populating the buffer pools with netmap buffers
was incomplete in several ways:
* The native driver resets the buffer offset when the mvpp2_open is
  called because it initialises the rx queues again. (e.g. when the
  device is made admin up)
* Changing the mtu potentially switches between per-cpu and shared
  buffer pools which will interfere with the netmap use of queues.
* Changing the mtu can alter which pools the port's queues may use which
  would interfere with the netmap pool use.

To correct these:
* Hook the rxq_offset_set call from the rxq init and set it according to
  netmap on state.
* Hook the mtu change function to avoid the various troublesome
  interactions with the native driver, refuse changes that would not fit
  in the netmap buffer size.
* Ensure the port rxq's are disabled before emptying them during netmap
  start and stop.
  • Loading branch information
Paul Davey authored and carlgsmith committed Nov 1, 2024
1 parent 606680f commit d8dedd8
Show file tree
Hide file tree
Showing 2 changed files with 113 additions and 18 deletions.
59 changes: 42 additions & 17 deletions LINUX/final-patches/vanilla--mvpp2--40406--99999
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
diff -u a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
--- a/mvpp2/mvpp2_main.c 2023-05-24 16:36:55.000000000 +0000
+++ b/mvpp2/mvpp2_main.c 2023-08-28 01:50:47.764324982 +0000
@@ -43,6 +43,10 @@
diff --git a/mvpp2/mvpp2_main.c b/mvpp2/mvpp2_main.c
index d3f644cad57d..386664b767d7 100644
--- a/mvpp2/mvpp2_main.c
+++ b/mvpp2/mvpp2_main.c
@@ -44,6 +44,10 @@
#include "mvpp2_prs.h"
#include "mvpp2_cls.h"

Expand All @@ -12,7 +13,7 @@ diff -u a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet
enum mvpp2_bm_pool_log_num {
MVPP2_BM_SHORT,
MVPP2_BM_LONG,
@@ -688,7 +692,7 @@
@@ -688,7 +692,7 @@ static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
}

/* Allocate and initialize BM pools */
Expand All @@ -21,19 +22,31 @@ diff -u a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet
sizeof(*priv->bm_pools), GFP_KERNEL);
if (!priv->bm_pools)
return -ENOMEM;
@@ -1435,7 +1439,11 @@ static void mvpp2_interrupts_unmask(void *arg)

val = MVPP2_CAUSE_MISC_SUM_MASK |
MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
@@ -1461,7 +1465,11 @@ static void mvpp2_interrupts_unmask(void *arg)
val = MVPP2_CAUSE_MISC_SUM_MASK |
MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
+#ifdef CONFIG_NETMAP
+ if (port->has_tx_irqs && !nm_netmap_on(NA(port->dev)))
+#else
if (port->has_tx_irqs)
if (port->has_tx_irqs)
+#endif
val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;

mvpp2_thread_write(port->priv, thread,
@@ -2976,7 +2984,11 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
put_cpu();

/* Set Offset */
+#ifdef CONFIG_NETMAP
+ mvpp2_netmap_rxq_offset_set(port, rxq->id);
+#else
mvpp2_rxq_offset_set(port, rxq->id, MVPP2_SKB_HEADROOM);
+#endif
val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;

mvpp2_thread_write(port->priv, thread,
@@ -3340,6 +3348,11 @@ static irqreturn_t mvpp2_isr(int irq, void *dev_id)
/* Set coalescing pkts and time */
mvpp2_rx_pkts_coal_set(port, rxq);
@@ -3366,6 +3378,11 @@ static irqreturn_t mvpp2_isr(int irq, void *dev_id)
{
struct mvpp2_queue_vector *qv = dev_id;

Expand All @@ -45,7 +58,7 @@ diff -u a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet
mvpp2_qvec_interrupt_disable(qv);

napi_schedule(&qv->napi);
@@ -4353,6 +4366,12 @@ static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
@@ -4382,6 +4399,12 @@ static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
}
frags = skb_shinfo(skb)->nr_frags + 1;

Expand All @@ -58,7 +71,19 @@ diff -u a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet
/* Check number of available descriptors */
if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) ||
mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) {
@@ -7004,6 +7023,9 @@ static void mvpp2_port_remove(struct mvpp2_port *port)
@@ -5831,7 +5854,11 @@ static const struct net_device_ops mvpp2_netdev_ops = {
.ndo_start_xmit = mvpp2_tx,
.ndo_set_rx_mode = mvpp2_set_rx_mode,
.ndo_set_mac_address = mvpp2_set_mac_address,
+#ifdef CONFIG_NETMAP
+ .ndo_change_mtu = mvpp2_netmap_change_mtu,
+#else
.ndo_change_mtu = mvpp2_change_mtu,
+#endif
.ndo_get_stats64 = mvpp2_get_stats64,
.ndo_eth_ioctl = mvpp2_ioctl,
.ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid,
@@ -7138,6 +7165,9 @@ static void mvpp2_port_remove(struct mvpp2_port *port)
{
int i;

Expand All @@ -68,7 +93,7 @@ diff -u a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet
unregister_netdev(port->dev);
if (port->phylink)
phylink_destroy(port->phylink);
@@ -7609,6 +7631,14 @@ static int mvpp2_probe(struct platform_device *pdev)
@@ -7743,6 +7773,14 @@ static int mvpp2_probe(struct platform_device *pdev)
mvpp2_dbgfs_init(priv, pdev->name);

platform_set_drvdata(pdev, priv);
Expand Down
72 changes: 71 additions & 1 deletion LINUX/mvpp2_netmap.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,8 @@ struct mvpp2_nm_adapter {
/* Extra netmap buffers required for each of the 4 netmap pools for each MVPP2 instance */
#define MVPP2_NETMAP_POOL_NUM_BUF 2048

#define MVPP2_NETMAP_MAX_RX_PKT_SIZE(na) NETMAP_BUF_SIZE(na) - NETMAP_SLOT_HEADROOM - MVPP2_SKB_SHINFO_SIZE

/* Used functions from below netmap header include */
static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread, u32 offset);
static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread, u32 offset);
Expand Down Expand Up @@ -71,6 +73,8 @@ static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port, int lrxq, int shor
static void mvpp2_rxq_offset_set(struct mvpp2_port *port, int prxq, int offset);
static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq);
static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool, dma_addr_t *dma_addr, phys_addr_t *phys_addr);
static void mvpp2_ingress_enable(struct mvpp2_port *port);
static void mvpp2_ingress_disable(struct mvpp2_port *port);


/* Mask/unmask TX interupts - we will process tx completion before sending new packets */
Expand Down Expand Up @@ -323,12 +327,50 @@ static int mvpp2_netmap_rxsync(struct netmap_kring *kring, int flags)
return 0;
}

static int mvpp2_netmap_change_mtu(struct net_device *dev, int mtu)
{
struct netmap_adapter *na = NA(dev);

/* The native drivers behaviour when setting the MTU shuffles queue
* pools in several circumstances and switches between per_cpu and
* shared pools. This is problematic for the zero copy support in the
* netmap driver as it forces the native driver into shared pool mode
* and then changes the netmap enabled interface pools. Thus having
* other code change pools associated with rx queues will break invariants
* expected by the netmap driver.
*
* Thus only allow changing the MTU below the buffer size alloted by netmap
* and don't actually alter any hardware settings based on this.
*/
if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
}

if (na && mtu > MVPP2_NETMAP_MAX_RX_PKT_SIZE(na)) {
netdev_err(dev, "Illegal MTU value %d (> %d) for netmap\n",
mtu, (int)MVPP2_NETMAP_MAX_RX_PKT_SIZE(na));
return -EINVAL;
}

/* Update mtu field in netdev
* Should be fine to leave the buffer size at the netmap size this should
* only mean some packets are received which are larger than the mtu.
* This also does not do any of the flow control config which the native
* driver does, this may need to be revisited.
*/
dev->mtu = mtu;

return 0;
}

static int mvpp2_netmap_users(struct mvpp2 *priv)
{
int count = 0;
int i;
for (i = 0; i < priv->port_count; i++) {
if (priv->port_list[i] && NA(priv->port_list[i]->dev))
if (priv->port_list[i] && NA(priv->port_list[i]->dev) && nm_netmap_on(NA(priv->port_list[i]->dev)))
count++;
}
return count;
Expand Down Expand Up @@ -404,11 +446,21 @@ static void mvpp2_netmap_destroy_pools(struct netmap_adapter *na)
BUG_ON(!priv->percpu_pools);
}

/* Set the correct rxq offset based on whether netmap is enabled or not */
static void mvpp2_netmap_rxq_offset_set(struct mvpp2_port *port, int prxq)
{
if (NA(port->dev) && nm_netmap_on(NA(port->dev)))
mvpp2_rxq_offset_set(port, prxq, NETMAP_SLOT_HEADROOM);
else
mvpp2_rxq_offset_set(port, prxq, MVPP2_SKB_HEADROOM);
}

static void mvpp2_netmap_start(struct netmap_adapter *na)
{
struct mvpp2_port *port = netdev_priv(na->ifp);
struct mvpp2 *priv = port->priv;
int i;
bool running = netif_running(port->dev);

rtnl_lock();

Expand All @@ -417,6 +469,10 @@ static void mvpp2_netmap_start(struct netmap_adapter *na)
mvpp2_netmap_create_pools (na, MVPP2_NETMAP_POOL_NUM_BUF);
}

/* Disable ingress while emptying rings */
if (running)
mvpp2_ingress_disable(port);

/* Use netmap BM pools for this port */
for (i = 0; i < port->nrxqs; i++) {
mvpp2_rxq_drop_pkts(port, port->rxqs[i]);
Expand All @@ -425,6 +481,11 @@ static void mvpp2_netmap_start(struct netmap_adapter *na)
mvpp2_rxq_offset_set(port, i, NETMAP_SLOT_HEADROOM);
}

/* Re-enable ingress if interface was running */
if (running)
mvpp2_ingress_enable(port);


rtnl_unlock();
}

Expand All @@ -434,9 +495,14 @@ static void mvpp2_netmap_stop(struct netmap_adapter *na)
struct mvpp2_port *port = netdev_priv(na->ifp);
struct mvpp2 *priv = port->priv;
int i;
bool running = netif_running(port->dev);

rtnl_lock();

/* Disable ingress while emptying rings */
if (running)
mvpp2_ingress_disable(port);

/* Use native BM pools for this port */
for (i = 0; i < port->nrxqs; i++) {
mvpp2_rxq_drop_pkts(port, port->rxqs[i]);
Expand All @@ -445,6 +511,10 @@ static void mvpp2_netmap_stop(struct netmap_adapter *na)
mvpp2_rxq_offset_set(port, i, MVPP2_SKB_HEADROOM);
}

/* Re-enable ingress if interface was running */
if (running)
mvpp2_ingress_enable(port);

/* Free all netmap buffers and pools if no longer needed */
if (mvpp2_netmap_users(priv) <= 1) {
mvpp2_netmap_destroy_pools (na);
Expand Down

0 comments on commit d8dedd8

Please sign in to comment.