aboutsummaryrefslogtreecommitdiffstats
path: root/target
diff options
context:
space:
mode:
authorFelix Fietkau <nbd@openwrt.org>2013-09-11 14:30:15 +0000
committerFelix Fietkau <nbd@openwrt.org>2013-09-11 14:30:15 +0000
commit8eb57d0cd7028967debfb8c24cd91589fb18e4de (patch)
treeedb8885c9b990c30d93fe390e4144b245f9abb21 /target
parent12762698cf7106800ad3446b494429621738a6da (diff)
downloadupstream-8eb57d0cd7028967debfb8c24cd91589fb18e4de.tar.gz
upstream-8eb57d0cd7028967debfb8c24cd91589fb18e4de.tar.bz2
upstream-8eb57d0cd7028967debfb8c24cd91589fb18e4de.zip
kernel: align the skb padding to power of two
The skb is usually started by a padding which allows the protocols in the network stack to add their headers in front of the payload. The skb can be reallocated in case the preallocated padding is not large enough. This can for example happen in the function __skb_cow which will check the requested extra headroom and allocate more buffer when the requested headroom is bigger than the available one. The extra buffer is aligned again to the multiple of the NET_SKB_PAD of the target architecture. The macro used to create the multiple of the NET_SKB_PAD is written in a way which allows only values power two as alignment parameter. The currently used value of 48 bytes can not be written as n ** 2 but as 2 ** 4 + 2 ** 5. The extra buffer is therefore not always the multiple of 48 but can be 16, 64, 80, 128, 144 and so on. The generated values are also not monotonic (48 requested bytes are mapped to 80 allocated bytes and 49 requested bytes are mapped to 64 allocated bytes). These unexpected small values result in more reallocations of the buffer. This was noticed prominently during tests between two QCA9558 720 MHz devices which were connected via ethernet to PCs and had a HT40 802.11n 3x3 link between each other. The throughput PC-to-PC during iperf TCP runs increased reliable from 186 Mibit/s to 214 Mibit/s in one direction and from 195 Mibit/s to 220 Mibit/s in the other direction. This is a performance increase of ~14% just by reducing the amount of reallocations. Signed-off-by: Sven Eckelmann <sven@open-mesh.com> SVN-Revision: 37948
Diffstat (limited to 'target')
-rw-r--r--target/linux/generic/patches-3.10/655-increase_skb_pad.patch2
-rw-r--r--target/linux/generic/patches-3.3/655-increase_skb_pad.patch2
-rw-r--r--target/linux/generic/patches-3.6/655-increase_skb_pad.patch2
-rw-r--r--target/linux/generic/patches-3.8/655-increase_skb_pad.patch2
-rw-r--r--target/linux/generic/patches-3.9/655-increase_skb_pad.patch2
5 files changed, 5 insertions, 5 deletions
diff --git a/target/linux/generic/patches-3.10/655-increase_skb_pad.patch b/target/linux/generic/patches-3.10/655-increase_skb_pad.patch
index b1a0fb71f8..6e10a1ace4 100644
--- a/target/linux/generic/patches-3.10/655-increase_skb_pad.patch
+++ b/target/linux/generic/patches-3.10/655-increase_skb_pad.patch
@@ -5,7 +5,7 @@
*/
#ifndef NET_SKB_PAD
-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
-+#define NET_SKB_PAD max(48, L1_CACHE_BYTES)
++#define NET_SKB_PAD max(64, L1_CACHE_BYTES)
#endif
extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
diff --git a/target/linux/generic/patches-3.3/655-increase_skb_pad.patch b/target/linux/generic/patches-3.3/655-increase_skb_pad.patch
index 5d14daadfa..c4c8c3416e 100644
--- a/target/linux/generic/patches-3.3/655-increase_skb_pad.patch
+++ b/target/linux/generic/patches-3.3/655-increase_skb_pad.patch
@@ -5,7 +5,7 @@
*/
#ifndef NET_SKB_PAD
-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
-+#define NET_SKB_PAD max(48, L1_CACHE_BYTES)
++#define NET_SKB_PAD max(64, L1_CACHE_BYTES)
#endif
extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
diff --git a/target/linux/generic/patches-3.6/655-increase_skb_pad.patch b/target/linux/generic/patches-3.6/655-increase_skb_pad.patch
index c67a6a4797..0f856a6cc0 100644
--- a/target/linux/generic/patches-3.6/655-increase_skb_pad.patch
+++ b/target/linux/generic/patches-3.6/655-increase_skb_pad.patch
@@ -5,7 +5,7 @@
*/
#ifndef NET_SKB_PAD
-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
-+#define NET_SKB_PAD max(48, L1_CACHE_BYTES)
++#define NET_SKB_PAD max(64, L1_CACHE_BYTES)
#endif
extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
diff --git a/target/linux/generic/patches-3.8/655-increase_skb_pad.patch b/target/linux/generic/patches-3.8/655-increase_skb_pad.patch
index 6150d5237d..9f0b3dc931 100644
--- a/target/linux/generic/patches-3.8/655-increase_skb_pad.patch
+++ b/target/linux/generic/patches-3.8/655-increase_skb_pad.patch
@@ -5,7 +5,7 @@
*/
#ifndef NET_SKB_PAD
-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
-+#define NET_SKB_PAD max(48, L1_CACHE_BYTES)
++#define NET_SKB_PAD max(64, L1_CACHE_BYTES)
#endif
extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
diff --git a/target/linux/generic/patches-3.9/655-increase_skb_pad.patch b/target/linux/generic/patches-3.9/655-increase_skb_pad.patch
index 6d0d3df841..9f669fa8bd 100644
--- a/target/linux/generic/patches-3.9/655-increase_skb_pad.patch
+++ b/target/linux/generic/patches-3.9/655-increase_skb_pad.patch
@@ -5,7 +5,7 @@
*/
#ifndef NET_SKB_PAD
-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
-+#define NET_SKB_PAD max(48, L1_CACHE_BYTES)
++#define NET_SKB_PAD max(64, L1_CACHE_BYTES)
#endif
extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);