From 8aa9f6bd71bcfd15e953a0932ed21953ab6d6bbf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= Date: Mon, 21 Nov 2016 11:12:09 +0100 Subject: mvebu: Add BQL patch for mvneta driver. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds the patch submitted to upstream that adds BQL to the mvneta driver: https://patchwork.kernel.org/patch/9328413/. Helps latency under load when the physical link is saturated. Signed-off-by: Toke Høiland-Jørgensen --- .../147-net-mvneta-add-BQL-support.patch | 83 ++++++++++++++++++++++ 1 file changed, 83 insertions(+) create mode 100644 target/linux/mvebu/patches-4.4/147-net-mvneta-add-BQL-support.patch diff --git a/target/linux/mvebu/patches-4.4/147-net-mvneta-add-BQL-support.patch b/target/linux/mvebu/patches-4.4/147-net-mvneta-add-BQL-support.patch new file mode 100644 index 0000000000..7bd2593537 --- /dev/null +++ b/target/linux/mvebu/patches-4.4/147-net-mvneta-add-BQL-support.patch @@ -0,0 +1,83 @@ +--- a/drivers/net/ethernet/marvell/mvneta.c ++++ b/drivers/net/ethernet/marvell/mvneta.c +@@ -1695,8 +1695,10 @@ static struct mvneta_tx_queue *mvneta_tx + + /* Free tx queue skbuffs */ + static void mvneta_txq_bufs_free(struct mvneta_port *pp, +- struct mvneta_tx_queue *txq, int num) ++ struct mvneta_tx_queue *txq, int num, ++ struct netdev_queue *nq) + { ++ unsigned int bytes_compl = 0, pkts_compl = 0; + int i; + + for (i = 0; i < num; i++) { +@@ -1704,6 +1706,11 @@ static void mvneta_txq_bufs_free(struct + txq->txq_get_index; + struct sk_buff *skb = txq->tx_skb[txq->txq_get_index]; + ++ if (skb) { ++ bytes_compl += skb->len; ++ pkts_compl++; ++ } ++ + mvneta_txq_inc_get(txq); + + if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr)) +@@ -1714,6 +1721,8 @@ static void mvneta_txq_bufs_free(struct + continue; + dev_kfree_skb_any(skb); + } ++ ++ netdev_tx_completed_queue(nq, pkts_compl, bytes_compl); + } + + /* Handle end of transmission */ +@@ -1727,7 +1736,7 @@ static void mvneta_txq_done(struct mvnet + if (!tx_done) + return; + +- mvneta_txq_bufs_free(pp, txq, tx_done); ++ mvneta_txq_bufs_free(pp, txq, tx_done, nq); + + txq->count -= tx_done; + +@@ -2334,6 +2343,8 @@ out: + struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); + struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); + ++ netdev_tx_sent_queue(nq, len); ++ + txq->count += frags; + mvneta_txq_pend_desc_add(pp, txq, frags); + +@@ -2358,9 +2369,10 @@ static void mvneta_txq_done_force(struct + struct mvneta_tx_queue *txq) + + { ++ struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); + int tx_done = txq->count; + +- mvneta_txq_bufs_free(pp, txq, tx_done); ++ mvneta_txq_bufs_free(pp, txq, tx_done, nq); + + /* reset txq */ + txq->count = 0; +@@ -2841,6 +2853,8 @@ static int mvneta_txq_init(struct mvneta + static void mvneta_txq_deinit(struct mvneta_port *pp, + struct mvneta_tx_queue *txq) + { ++ struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); ++ + kfree(txq->tx_skb); + + if (txq->tso_hdrs) +@@ -2852,6 +2866,8 @@ static void mvneta_txq_deinit(struct mvn + txq->size * MVNETA_DESC_ALIGNED_SIZE, + txq->descs, txq->descs_phys); + ++ netdev_tx_reset_queue(nq); ++ + txq->descs = NULL; + txq->last_desc = 0; + txq->next_desc_to_proc = 0; -- cgit v1.2.3