aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/mvebu/patches-5.4/011-net-mvneta-introduce-page-pool-API-for-sw-buffer-man.patch
diff options
context:
space:
mode:
authorTomasz Maciej Nowak <tmn505@gmail.com>2020-11-23 14:59:45 +0100
committerAdrian Schmutzler <freifunk@adrianschmutzler.de>2020-11-25 13:57:50 +0100
commit92b3efec54b36797614650c830a57c4f8786b5c9 (patch)
tree4ed06c90bde035a38ecd6d9a79004717ec3ee5ce /target/linux/mvebu/patches-5.4/011-net-mvneta-introduce-page-pool-API-for-sw-buffer-man.patch
parentbffb30603cca586ee2a5713748ed1f6e76d6754b (diff)
downloadupstream-92b3efec54b36797614650c830a57c4f8786b5c9.tar.gz
upstream-92b3efec54b36797614650c830a57c4f8786b5c9.tar.bz2
upstream-92b3efec54b36797614650c830a57c4f8786b5c9.zip
mvebu: sort patches
Sort patches according to target/linux/generic/PATCHES. Additionally: - replace hashes in backported patches with the ones from main Linux tree - add descriptions to some patches Signed-off-by: Tomasz Maciej Nowak <tmn505@gmail.com> [remove 004-add_sata_disk_activity_trigger.patch separately] Signed-off-by: Adrian Schmutzler <freifunk@adrianschmutzler.de>
Diffstat (limited to 'target/linux/mvebu/patches-5.4/011-net-mvneta-introduce-page-pool-API-for-sw-buffer-man.patch')
-rw-r--r--target/linux/mvebu/patches-5.4/011-net-mvneta-introduce-page-pool-API-for-sw-buffer-man.patch181
1 files changed, 0 insertions, 181 deletions
diff --git a/target/linux/mvebu/patches-5.4/011-net-mvneta-introduce-page-pool-API-for-sw-buffer-man.patch b/target/linux/mvebu/patches-5.4/011-net-mvneta-introduce-page-pool-API-for-sw-buffer-man.patch
deleted file mode 100644
index dbed6df873..0000000000
--- a/target/linux/mvebu/patches-5.4/011-net-mvneta-introduce-page-pool-API-for-sw-buffer-man.patch
+++ /dev/null
@@ -1,181 +0,0 @@
-From 160f006a6fe904177cbca867c48dfb6d27262dd5 Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <lorenzo@kernel.org>
-Date: Sat, 19 Oct 2019 10:13:22 +0200
-Subject: [PATCH 2/7] net: mvneta: introduce page pool API for sw buffer
- manager
-
-Use the page_pool api for allocations and DMA handling instead of
-__dev_alloc_page()/dma_map_page() and free_page()/dma_unmap_page().
-Pages are unmapped using page_pool_release_page before packets
-go into the network stack.
-
-The page_pool API offers buffer recycling capabilities for XDP but
-allocates one page per packet, unless the driver splits and manages
-the allocated page.
-This is a preliminary patch to add XDP support to mvneta driver
-
-Signed-off-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
-Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
-Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
-Signed-off-by: David S. Miller <davem@davemloft.net>
----
- drivers/net/ethernet/marvell/Kconfig | 1 +
- drivers/net/ethernet/marvell/mvneta.c | 83 +++++++++++++++++++++------
- 2 files changed, 65 insertions(+), 19 deletions(-)
-
---- a/drivers/net/ethernet/marvell/Kconfig
-+++ b/drivers/net/ethernet/marvell/Kconfig
-@@ -61,6 +61,7 @@ config MVNETA
- depends on ARCH_MVEBU || COMPILE_TEST
- select MVMDIO
- select PHYLINK
-+ select PAGE_POOL
- ---help---
- This driver supports the network interface units in the
- Marvell ARMADA XP, ARMADA 370, ARMADA 38x and
---- a/drivers/net/ethernet/marvell/mvneta.c
-+++ b/drivers/net/ethernet/marvell/mvneta.c
-@@ -37,6 +37,7 @@
- #include <net/ip.h>
- #include <net/ipv6.h>
- #include <net/tso.h>
-+#include <net/page_pool.h>
-
- /* Registers */
- #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
-@@ -607,6 +608,10 @@ struct mvneta_rx_queue {
- u32 pkts_coal;
- u32 time_coal;
-
-+ /* page_pool */
-+ struct page_pool *page_pool;
-+ struct xdp_rxq_info xdp_rxq;
-+
- /* Virtual address of the RX buffer */
- void **buf_virt_addr;
-
-@@ -1825,23 +1830,21 @@ static int mvneta_rx_refill(struct mvnet
- struct mvneta_rx_queue *rxq,
- gfp_t gfp_mask)
- {
-+ enum dma_data_direction dma_dir;
- dma_addr_t phys_addr;
- struct page *page;
-
-- page = __dev_alloc_page(gfp_mask);
-+ page = page_pool_alloc_pages(rxq->page_pool,
-+ gfp_mask | __GFP_NOWARN);
- if (!page)
- return -ENOMEM;
-
-- /* map page for use */
-- phys_addr = dma_map_page(pp->dev->dev.parent, page, 0, PAGE_SIZE,
-- DMA_FROM_DEVICE);
-- if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
-- __free_page(page);
-- return -ENOMEM;
-- }
--
-- phys_addr += pp->rx_offset_correction;
-+ phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction;
-+ dma_dir = page_pool_get_dma_dir(rxq->page_pool);
-+ dma_sync_single_for_device(pp->dev->dev.parent, phys_addr,
-+ PAGE_SIZE, dma_dir);
- mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
-+
- return 0;
- }
-
-@@ -1907,10 +1910,12 @@ static void mvneta_rxq_drop_pkts(struct
- if (!data || !(rx_desc->buf_phys_addr))
- continue;
-
-- dma_unmap_page(pp->dev->dev.parent, rx_desc->buf_phys_addr,
-- PAGE_SIZE, DMA_FROM_DEVICE);
-- __free_page(data);
-+ page_pool_put_page(rxq->page_pool, data, false);
- }
-+ if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
-+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
-+ page_pool_destroy(rxq->page_pool);
-+ rxq->page_pool = NULL;
- }
-
- static void
-@@ -2047,8 +2052,7 @@ static int mvneta_rx_swbm(struct napi_st
- skb_add_rx_frag(rxq->skb, frag_num, page,
- frag_offset, frag_size,
- PAGE_SIZE);
-- dma_unmap_page(dev->dev.parent, phys_addr,
-- PAGE_SIZE, DMA_FROM_DEVICE);
-+ page_pool_release_page(rxq->page_pool, page);
- rxq->left_size -= frag_size;
- }
- } else {
-@@ -2078,9 +2082,7 @@ static int mvneta_rx_swbm(struct napi_st
- frag_offset, frag_size,
- PAGE_SIZE);
-
-- dma_unmap_page(dev->dev.parent, phys_addr,
-- PAGE_SIZE, DMA_FROM_DEVICE);
--
-+ page_pool_release_page(rxq->page_pool, page);
- rxq->left_size -= frag_size;
- }
- } /* Middle or Last descriptor */
-@@ -2847,11 +2849,54 @@ static int mvneta_poll(struct napi_struc
- return rx_done;
- }
-
-+static int mvneta_create_page_pool(struct mvneta_port *pp,
-+ struct mvneta_rx_queue *rxq, int size)
-+{
-+ struct page_pool_params pp_params = {
-+ .order = 0,
-+ .flags = PP_FLAG_DMA_MAP,
-+ .pool_size = size,
-+ .nid = cpu_to_node(0),
-+ .dev = pp->dev->dev.parent,
-+ .dma_dir = DMA_FROM_DEVICE,
-+ };
-+ int err;
-+
-+ rxq->page_pool = page_pool_create(&pp_params);
-+ if (IS_ERR(rxq->page_pool)) {
-+ err = PTR_ERR(rxq->page_pool);
-+ rxq->page_pool = NULL;
-+ return err;
-+ }
-+
-+ err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id);
-+ if (err < 0)
-+ goto err_free_pp;
-+
-+ err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
-+ rxq->page_pool);
-+ if (err)
-+ goto err_unregister_rxq;
-+
-+ return 0;
-+
-+err_unregister_rxq:
-+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
-+err_free_pp:
-+ page_pool_destroy(rxq->page_pool);
-+ rxq->page_pool = NULL;
-+ return err;
-+}
-+
- /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
- static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
- int num)
- {
-- int i;
-+ int i, err;
-+
-+ err = mvneta_create_page_pool(pp, rxq, num);
-+ if (err < 0)
-+ return err;
-
- for (i = 0; i < num; i++) {
- memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));