From 26d3cc476c26832e1e05db182ac27906f6c81f2d Mon Sep 17 00:00:00 2001 From: Camelia Groza Date: Tue, 29 Oct 2019 16:12:18 +0200 Subject: [PATCH] sdk_dpaa: ls1043a errata: memory related fixes Avoid a crash by verifying the allocation return status. Use the standard API for determining the page order needed for allocating Jumbo sized skbs. Explicitly remove the old skb outside the w/a, for both successful and unsuccessful realignments. Make sure the old skb's memory isn't leaked. Signed-off-by: Camelia Groza --- .../net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c | 30 ++++++++++++++-------- 1 file changed, 19 insertions(+), 11 deletions(-) --- a/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c @@ -809,8 +809,8 @@ static struct sk_buff *a010022_realign_s { int trans_offset = skb_transport_offset(skb); int net_offset = skb_network_offset(skb); - int nsize, headroom, npage_order; struct sk_buff *nskb = NULL; + int nsize, headroom; struct page *npage; void *npage_addr; @@ -825,8 +825,7 @@ static struct sk_buff *a010022_realign_s SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); /* Reserve enough memory to accommodate Jumbo frames */ - npage_order = (nsize - 1) / PAGE_SIZE; - npage = alloc_pages(GFP_ATOMIC | __GFP_COMP, npage_order); + npage = alloc_pages(GFP_ATOMIC | __GFP_COMP, get_order(nsize)); if (unlikely(!npage)) { WARN_ONCE(1, "Memory allocation failure\n"); return NULL; @@ -869,7 +868,6 @@ static struct sk_buff *a010022_realign_s /* We don't want the buffer to be recycled so we mark it accordingly */ nskb->mark = NONREC_MARK; - dev_kfree_skb(skb); return nskb; err: @@ -911,8 +909,13 @@ int __hot skb_to_sg_fd(struct dpa_priv_s * is in place and we need to avoid crossing a 4k boundary. */ #ifndef CONFIG_PPC - if (unlikely(dpaa_errata_a010022)) - sgt_buf = page_address(alloc_page(GFP_ATOMIC)); + if (unlikely(dpaa_errata_a010022)) { + struct page *new_page = alloc_page(GFP_ATOMIC); + + if (unlikely(!new_page)) + return -ENOMEM; + sgt_buf = page_address(new_page); + } else #endif sgt_buf = netdev_alloc_frag(priv->tx_headroom + sgt_size); @@ -1061,6 +1064,7 @@ int __hot dpa_tx_extended(struct sk_buff int err = 0; bool nonlinear; int *countptr, offset = 0; + struct sk_buff *nskb; priv = netdev_priv(net_dev); /* Non-migratable context, safe to use raw_cpu_ptr */ @@ -1072,9 +1076,11 @@ int __hot dpa_tx_extended(struct sk_buff #ifndef CONFIG_PPC if (unlikely(dpaa_errata_a010022) && a010022_check_skb(skb, priv)) { - skb = a010022_realign_skb(skb, priv); - if (!skb) + nskb = a010022_realign_skb(skb, priv); + if (!nskb) goto skb_to_fd_failed; + dev_kfree_skb(skb); + skb = nskb; } #endif @@ -1130,15 +1136,17 @@ int __hot dpa_tx_extended(struct sk_buff /* Code borrowed from skb_unshare(). */ if (skb_cloned(skb)) { - struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); + nskb = skb_copy(skb, GFP_ATOMIC); kfree_skb(skb); skb = nskb; #ifndef CONFIG_PPC if (unlikely(dpaa_errata_a010022) && a010022_check_skb(skb, priv)) { - skb = a010022_realign_skb(skb, priv); - if (!skb) + nskb = a010022_realign_skb(skb, priv); + if (!nskb) goto skb_to_fd_failed; + dev_kfree_skb(skb); + skb = nskb; } #endif /* skb_copy() has now linearized the skbuff. */