1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
|
From 26d3cc476c26832e1e05db182ac27906f6c81f2d Mon Sep 17 00:00:00 2001
From: Camelia Groza <camelia.groza@nxp.com>
Date: Tue, 29 Oct 2019 16:12:18 +0200
Subject: [PATCH] sdk_dpaa: ls1043a errata: memory related fixes
Avoid a crash by verifying the allocation return status.
Use the standard API for determining the page order needed for
allocating Jumbo sized skbs.
Explicitly remove the old skb outside the w/a, for both successful and
unsuccessful realignments. Make sure the old skb's memory isn't leaked.
Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
---
.../net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c | 30 ++++++++++++++--------
1 file changed, 19 insertions(+), 11 deletions(-)
--- a/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c
@@ -809,8 +809,8 @@ static struct sk_buff *a010022_realign_s
{
int trans_offset = skb_transport_offset(skb);
int net_offset = skb_network_offset(skb);
- int nsize, headroom, npage_order;
struct sk_buff *nskb = NULL;
+ int nsize, headroom;
struct page *npage;
void *npage_addr;
@@ -825,8 +825,7 @@ static struct sk_buff *a010022_realign_s
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
/* Reserve enough memory to accommodate Jumbo frames */
- npage_order = (nsize - 1) / PAGE_SIZE;
- npage = alloc_pages(GFP_ATOMIC | __GFP_COMP, npage_order);
+ npage = alloc_pages(GFP_ATOMIC | __GFP_COMP, get_order(nsize));
if (unlikely(!npage)) {
WARN_ONCE(1, "Memory allocation failure\n");
return NULL;
@@ -869,7 +868,6 @@ static struct sk_buff *a010022_realign_s
/* We don't want the buffer to be recycled so we mark it accordingly */
nskb->mark = NONREC_MARK;
- dev_kfree_skb(skb);
return nskb;
err:
@@ -911,8 +909,13 @@ int __hot skb_to_sg_fd(struct dpa_priv_s
* is in place and we need to avoid crossing a 4k boundary.
*/
#ifndef CONFIG_PPC
- if (unlikely(dpaa_errata_a010022))
- sgt_buf = page_address(alloc_page(GFP_ATOMIC));
+ if (unlikely(dpaa_errata_a010022)) {
+ struct page *new_page = alloc_page(GFP_ATOMIC);
+
+ if (unlikely(!new_page))
+ return -ENOMEM;
+ sgt_buf = page_address(new_page);
+ }
else
#endif
sgt_buf = netdev_alloc_frag(priv->tx_headroom + sgt_size);
@@ -1061,6 +1064,7 @@ int __hot dpa_tx_extended(struct sk_buff
int err = 0;
bool nonlinear;
int *countptr, offset = 0;
+ struct sk_buff *nskb;
priv = netdev_priv(net_dev);
/* Non-migratable context, safe to use raw_cpu_ptr */
@@ -1072,9 +1076,11 @@ int __hot dpa_tx_extended(struct sk_buff
#ifndef CONFIG_PPC
if (unlikely(dpaa_errata_a010022) && a010022_check_skb(skb, priv)) {
- skb = a010022_realign_skb(skb, priv);
- if (!skb)
+ nskb = a010022_realign_skb(skb, priv);
+ if (!nskb)
goto skb_to_fd_failed;
+ dev_kfree_skb(skb);
+ skb = nskb;
}
#endif
@@ -1130,15 +1136,17 @@ int __hot dpa_tx_extended(struct sk_buff
/* Code borrowed from skb_unshare(). */
if (skb_cloned(skb)) {
- struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
+ nskb = skb_copy(skb, GFP_ATOMIC);
kfree_skb(skb);
skb = nskb;
#ifndef CONFIG_PPC
if (unlikely(dpaa_errata_a010022) &&
a010022_check_skb(skb, priv)) {
- skb = a010022_realign_skb(skb, priv);
- if (!skb)
+ nskb = a010022_realign_skb(skb, priv);
+ if (!nskb)
goto skb_to_fd_failed;
+ dev_kfree_skb(skb);
+ skb = nskb;
}
#endif
/* skb_copy() has now linearized the skbuff. */
|