aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/pending-5.10
diff options
context:
space:
mode:
authorFelix Fietkau <nbd@nbd.name>2020-10-24 21:14:16 +0200
committerFelix Fietkau <nbd@nbd.name>2021-02-16 20:06:51 +0100
commitb10d6044599d8c1fa7fbb2374bcbf30118d39db1 (patch)
tree3fc84f92c14dfa7178487b71e1b8d63c2b688439 /target/linux/generic/pending-5.10
parent299b8554183791b325e393c880d32360d7d72f73 (diff)
downloadupstream-b10d6044599d8c1fa7fbb2374bcbf30118d39db1.tar.gz
upstream-b10d6044599d8c1fa7fbb2374bcbf30118d39db1.tar.bz2
upstream-b10d6044599d8c1fa7fbb2374bcbf30118d39db1.zip
kernel: add linux 5.10 support
Signed-off-by: Felix Fietkau <nbd@nbd.name>
Diffstat (limited to 'target/linux/generic/pending-5.10')
-rw-r--r--target/linux/generic/pending-5.10/100-compiler.h-only-include-asm-rwonce.h-for-kernel-code.patch29
-rw-r--r--target/linux/generic/pending-5.10/102-MIPS-only-process-negative-stack-offsets-on-stack-tr.patch57
-rw-r--r--target/linux/generic/pending-5.10/110-ehci_hcd_ignore_oc.patch79
-rw-r--r--target/linux/generic/pending-5.10/120-Fix-alloc_node_mem_map-with-ARCH_PFN_OFFSET-calcu.patch82
-rw-r--r--target/linux/generic/pending-5.10/130-add-linux-spidev-compatible-si3210.patch18
-rw-r--r--target/linux/generic/pending-5.10/140-jffs2-use-.rename2-and-add-RENAME_WHITEOUT-support.patch78
-rw-r--r--target/linux/generic/pending-5.10/141-jffs2-add-RENAME_EXCHANGE-support.patch73
-rw-r--r--target/linux/generic/pending-5.10/142-jffs2-add-splice-ops.patch20
-rw-r--r--target/linux/generic/pending-5.10/150-bridge_allow_receiption_on_disabled_port.patch45
-rw-r--r--target/linux/generic/pending-5.10/190-rtc-rs5c372-support_alarms_up_to_1_week.patch94
-rw-r--r--target/linux/generic/pending-5.10/191-rtc-rs5c372-let_the_alarm_to_be_used_as_wakeup_source.patch70
-rw-r--r--target/linux/generic/pending-5.10/201-extra_optimization.patch31
-rw-r--r--target/linux/generic/pending-5.10/203-kallsyms_uncompressed.patch119
-rw-r--r--target/linux/generic/pending-5.10/205-backtrace_module_info.patch41
-rw-r--r--target/linux/generic/pending-5.10/240-remove-unsane-filenames-from-deps_initramfs-list.patch30
-rw-r--r--target/linux/generic/pending-5.10/261-enable_wilink_platform_without_drivers.patch20
-rw-r--r--target/linux/generic/pending-5.10/270-platform-mikrotik-build-bits.patch31
-rw-r--r--target/linux/generic/pending-5.10/300-mips_expose_boot_raw.patch40
-rw-r--r--target/linux/generic/pending-5.10/302-mips_no_branch_likely.patch22
-rw-r--r--target/linux/generic/pending-5.10/305-mips_module_reloc.patch371
-rw-r--r--target/linux/generic/pending-5.10/307-mips_highmem_offset.patch19
-rw-r--r--target/linux/generic/pending-5.10/308-mips32r2_tune.patch22
-rw-r--r--target/linux/generic/pending-5.10/309-MIPS-Add-CPU-option-reporting-to-proc-cpuinfo.patch140
-rw-r--r--target/linux/generic/pending-5.10/310-arm_module_unresolved_weak_sym.patch22
-rw-r--r--target/linux/generic/pending-5.10/311-MIPS-zboot-put-appended-dtb-into-a-section.patch36
-rw-r--r--target/linux/generic/pending-5.10/330-MIPS-kexec-Accept-command-line-parameters-from-users.patch281
-rw-r--r--target/linux/generic/pending-5.10/332-arc-add-OWRTDTB-section.patch84
-rw-r--r--target/linux/generic/pending-5.10/333-arc-enable-unaligned-access-in-kernel-mode.patch24
-rw-r--r--target/linux/generic/pending-5.10/342-powerpc-Enable-kernel-XZ-compression-option-on-PPC_8.patch25
-rw-r--r--target/linux/generic/pending-5.10/400-mtd-mtdsplit-support.patch313
-rw-r--r--target/linux/generic/pending-5.10/419-mtd-redboot-add-of_match_table-with-DT-binding.patch22
-rw-r--r--target/linux/generic/pending-5.10/420-mtd-redboot_space.patch41
-rw-r--r--target/linux/generic/pending-5.10/430-mtd-add-myloader-partition-parser.patch229
-rw-r--r--target/linux/generic/pending-5.10/431-mtd-bcm47xxpart-check-for-bad-blocks-when-calculatin.patch68
-rw-r--r--target/linux/generic/pending-5.10/432-mtd-bcm47xxpart-detect-T_Meter-partition.patch37
-rw-r--r--target/linux/generic/pending-5.10/435-mtd-add-routerbootpart-parser-config.patch38
-rw-r--r--target/linux/generic/pending-5.10/460-mtd-cfi_cmdset_0002-no-erase_suspend.patch25
-rw-r--r--target/linux/generic/pending-5.10/461-mtd-cfi_cmdset_0002-add-buffer-write-cmd-timeout.patch17
-rw-r--r--target/linux/generic/pending-5.10/465-m25p80-mx-disable-software-protection.patch18
-rw-r--r--target/linux/generic/pending-5.10/470-mtd-spi-nor-support-limiting-4K-sectors-support-base.patch71
-rw-r--r--target/linux/generic/pending-5.10/476-mtd-spi-nor-add-eon-en25q128.patch18
-rw-r--r--target/linux/generic/pending-5.10/479-mtd-spi-nor-add-xtx-xt25f128b.patch79
-rw-r--r--target/linux/generic/pending-5.10/480-mtd-set-rootfs-to-be-root-dev.patch38
-rw-r--r--target/linux/generic/pending-5.10/481-mtd-spi-nor-rework-broken-flash-reset-support.patch180
-rw-r--r--target/linux/generic/pending-5.10/482-mtd-spi-nor-add-support-for-Gigadevice-GD25D05.patch22
-rw-r--r--target/linux/generic/pending-5.10/490-ubi-auto-attach-mtd-device-named-ubi-or-data-on-boot.patch97
-rw-r--r--target/linux/generic/pending-5.10/491-ubi-auto-create-ubiblock-device-for-rootfs.patch66
-rw-r--r--target/linux/generic/pending-5.10/492-try-auto-mounting-ubi0-rootfs-in-init-do_mounts.c.patch51
-rw-r--r--target/linux/generic/pending-5.10/493-ubi-set-ROOT_DEV-to-ubiblock-rootfs-if-unset.patch34
-rw-r--r--target/linux/generic/pending-5.10/494-mtd-ubi-add-EOF-marker-support.patch60
-rw-r--r--target/linux/generic/pending-5.10/495-mtd-core-add-get_mtd_device_by_node.patch75
-rw-r--r--target/linux/generic/pending-5.10/496-dt-bindings-add-bindings-for-mtd-concat-devices.patch52
-rw-r--r--target/linux/generic/pending-5.10/497-mtd-mtdconcat-add-dt-driver-for-concat-devices.patch216
-rw-r--r--target/linux/generic/pending-5.10/498-mtd-mtdconcat-select-readwrite-function.patch24
-rw-r--r--target/linux/generic/pending-5.10/500-fs_cdrom_dependencies.patch40
-rw-r--r--target/linux/generic/pending-5.10/530-jffs2_make_lzma_available.patch5180
-rw-r--r--target/linux/generic/pending-5.10/532-jffs2_eofdetect.patch65
-rw-r--r--target/linux/generic/pending-5.10/600-netfilter_conntrack_flush.patch88
-rw-r--r--target/linux/generic/pending-5.10/610-netfilter_match_bypass_default_checks.patch110
-rw-r--r--target/linux/generic/pending-5.10/611-netfilter_match_bypass_default_table.patch106
-rw-r--r--target/linux/generic/pending-5.10/612-netfilter_match_reduce_memory_access.patch22
-rw-r--r--target/linux/generic/pending-5.10/613-netfilter_optional_tcp_window_check.patch73
-rw-r--r--target/linux/generic/pending-5.10/620-net_sched-codel-do-not-defer-queue-length-update.patch86
-rw-r--r--target/linux/generic/pending-5.10/630-packet_socket_type.patch138
-rw-r--r--target/linux/generic/pending-5.10/640-00-netfilter-flowtable-add-hash-offset-field-to-tuple.patch52
-rw-r--r--target/linux/generic/pending-5.10/640-01-netfilter-flowtable-add-xmit-path-types.patch188
-rw-r--r--target/linux/generic/pending-5.10/640-02-net-resolve-forwarding-path-from-virtual-netdevice-a.patch170
-rw-r--r--target/linux/generic/pending-5.10/640-03-net-8021q-resolve-forwarding-path-for-vlan-devices.patch80
-rw-r--r--target/linux/generic/pending-5.10/640-04-bridge-resolve-forwarding-path-for-bridge-devices.patch62
-rw-r--r--target/linux/generic/pending-5.10/640-05-netfilter-flowtable-use-dev_fill_forward_path-to-obt.patch191
-rw-r--r--target/linux/generic/pending-5.10/640-06-netfilter-flowtable-use-dev_fill_forward_path-to-obt.patch336
-rw-r--r--target/linux/generic/pending-5.10/640-07-netfilter-flowtable-add-vlan-support.patch364
-rw-r--r--target/linux/generic/pending-5.10/640-08-selftests-netfilter-flowtable-bridge-and-VLAN-suppor.patch107
-rw-r--r--target/linux/generic/pending-5.10/640-09-net-bridge-resolve-VLAN-tag-actions-in-forwarding-pa.patch207
-rw-r--r--target/linux/generic/pending-5.10/640-10-net-dsa-resolve-forwarding-path-for-dsa-slave-ports.patch62
-rw-r--r--target/linux/generic/pending-5.10/640-11-netfilter-flowtable-add-offload-support-for-xmit-pat.patch307
-rw-r--r--target/linux/generic/pending-5.10/640-12-netfilter-nft_flow_offload-add-dsa-support.patch30
-rw-r--r--target/linux/generic/pending-5.10/640-13-dsa-slave-add-support-for-TC_SETUP_FT.patch53
-rw-r--r--target/linux/generic/pending-5.10/640-14-netfilter-nft_flow_offload-add-bridge-vlan-filtering.patch31
-rw-r--r--target/linux/generic/pending-5.10/640-15-netfilter-nft_flow_offload-use-direct-xmit-if-hardwa.patch51
-rw-r--r--target/linux/generic/pending-5.10/640-16-netfilter-nft_flow_offload-fix-bridge-vlan-tag-handl.patch22
-rw-r--r--target/linux/generic/pending-5.10/640-17-netfilter-flowtable-rework-ingress-vlan-matching.patch143
-rw-r--r--target/linux/generic/pending-5.10/640-18-netfilter-flowtable-handle-bridge-vlan-filter-offloa.patch106
-rw-r--r--target/linux/generic/pending-5.10/655-increase_skb_pad.patch20
-rw-r--r--target/linux/generic/pending-5.10/666-Add-support-for-MAP-E-FMRs-mesh-mode.patch511
-rw-r--r--target/linux/generic/pending-5.10/670-ipv6-allow-rejecting-with-source-address-failed-policy.patch263
-rw-r--r--target/linux/generic/pending-5.10/671-net-provide-defines-for-_POLICY_FAILED-until-all-cod.patch50
-rw-r--r--target/linux/generic/pending-5.10/680-NET-skip-GRO-for-foreign-MAC-addresses.patch149
-rw-r--r--target/linux/generic/pending-5.10/681-NET-add-of_get_mac_address_mtd.patch135
-rw-r--r--target/linux/generic/pending-5.10/690-net-add-support-for-threaded-NAPI-polling.patch284
-rw-r--r--target/linux/generic/pending-5.10/691-net-add-sysfs-attribute-for-enabling-threaded-NAPI.patch74
-rw-r--r--target/linux/generic/pending-5.10/703-phy-add-detach-callback-to-struct-phy_driver.patch38
-rw-r--r--target/linux/generic/pending-5.10/735-net-phy-at803x-fix-at8033-sgmii-mode.patch51
-rw-r--r--target/linux/generic/pending-5.10/760-net-dsa-mv88e6xxx-fix-vlan-setup.patch27
-rw-r--r--target/linux/generic/pending-5.10/761-net-dsa-mt7530-Support-EEE-features.patch121
-rw-r--r--target/linux/generic/pending-5.10/770-00-net-ethernet-mtk_eth_soc-use-napi_consume_skb.patch72
-rw-r--r--target/linux/generic/pending-5.10/770-01-net-ethernet-mtk_eth_soc-significantly-reduce-mdio-b.patch26
-rw-r--r--target/linux/generic/pending-5.10/770-02-net-ethernet-mtk_eth_soc-fix-rx-vlan-offload.patch31
-rw-r--r--target/linux/generic/pending-5.10/770-03-net-ethernet-mtk_eth_soc-fix-unnecessary-tx-queue-st.patch50
-rw-r--r--target/linux/generic/pending-5.10/770-04-net-ethernet-mtk_eth_soc-use-larger-burst-size-for-q.patch32
-rw-r--r--target/linux/generic/pending-5.10/770-05-net-ethernet-mtk_eth_soc-increase-DMA-ring-sizes.patch21
-rw-r--r--target/linux/generic/pending-5.10/770-06-net-ethernet-mtk_eth_soc-implement-dynamic-interrupt.patch281
-rw-r--r--target/linux/generic/pending-5.10/770-08-net-ethernet-mtk_eth_soc-cache-hardware-pointer-of-l.patch67
-rw-r--r--target/linux/generic/pending-5.10/770-09-net-ethernet-mtk_eth_soc-only-read-the-full-rx-descr.patch44
-rw-r--r--target/linux/generic/pending-5.10/770-10-net-ethernet-mtk_eth_soc-unmap-rx-data-before-callin.patch44
-rw-r--r--target/linux/generic/pending-5.10/770-11-net-ethernet-mtk_eth_soc-avoid-rearming-interrupt-if.patch35
-rw-r--r--target/linux/generic/pending-5.10/770-13-net-ethernet-mtk_eth_soc-fix-parsing-packets-in-GDM.patch67
-rw-r--r--target/linux/generic/pending-5.10/770-14-net-ethernet-mtk_eth_soc-set-PPE-flow-hash-as-skb-ha.patch41
-rw-r--r--target/linux/generic/pending-5.10/770-15-net-ethernet-mtk_eth_soc-add-support-for-initializin.patch1307
-rw-r--r--target/linux/generic/pending-5.10/770-16-net-ethernet-mtk_eth_soc-add-flow-offloading-support.patch561
-rw-r--r--target/linux/generic/pending-5.10/800-bcma-get-SoC-device-struct-copy-its-DMA-params-to-th.patch70
-rw-r--r--target/linux/generic/pending-5.10/810-pci_disable_common_quirks.patch62
-rw-r--r--target/linux/generic/pending-5.10/811-pci_disable_usb_common_quirks.patch115
-rw-r--r--target/linux/generic/pending-5.10/820-w1-gpio-fix-problem-with-platfom-data-in-w1-gpio.patch26
-rw-r--r--target/linux/generic/pending-5.10/834-ledtrig-libata.patch149
-rw-r--r--target/linux/generic/pending-5.10/920-mangle_bootargs.patch71
116 files changed, 17259 insertions, 0 deletions
diff --git a/target/linux/generic/pending-5.10/100-compiler.h-only-include-asm-rwonce.h-for-kernel-code.patch b/target/linux/generic/pending-5.10/100-compiler.h-only-include-asm-rwonce.h-for-kernel-code.patch
new file mode 100644
index 0000000000..39b98eac1b
--- /dev/null
+++ b/target/linux/generic/pending-5.10/100-compiler.h-only-include-asm-rwonce.h-for-kernel-code.patch
@@ -0,0 +1,29 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Thu, 22 Oct 2020 22:00:03 +0200
+Subject: [PATCH] compiler.h: only include asm/rwonce.h for kernel code
+
+This header file is not in uapi, which makes any user space code that includes
+linux/compiler.h to fail with the error 'asm/rwonce.h: No such file or directory'
+
+Fixes: e506ea451254 ("compiler.h: Split {READ,WRITE}_ONCE definitions out into rwonce.h")
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -211,6 +211,8 @@ void ftrace_likely_update(struct ftrace_
+ __v; \
+ })
+
++#include <asm/rwonce.h>
++
+ #endif /* __KERNEL__ */
+
+ /*
+@@ -243,6 +245,4 @@ static inline void *offset_to_ptr(const
+ */
+ #define prevent_tail_call_optimization() mb()
+
+-#include <asm/rwonce.h>
+-
+ #endif /* __LINUX_COMPILER_H */
diff --git a/target/linux/generic/pending-5.10/102-MIPS-only-process-negative-stack-offsets-on-stack-tr.patch b/target/linux/generic/pending-5.10/102-MIPS-only-process-negative-stack-offsets-on-stack-tr.patch
new file mode 100644
index 0000000000..cc9f99e8b0
--- /dev/null
+++ b/target/linux/generic/pending-5.10/102-MIPS-only-process-negative-stack-offsets-on-stack-tr.patch
@@ -0,0 +1,57 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Wed, 18 Apr 2018 10:50:05 +0200
+Subject: [PATCH] MIPS: only process negative stack offsets on stack traces
+
+Fixes endless back traces in cases where the compiler emits a stack
+pointer increase in a branch delay slot (probably for some form of
+function return).
+
+[ 3.475442] BUG: MAX_STACK_TRACE_ENTRIES too low!
+[ 3.480070] turning off the locking correctness validator.
+[ 3.485521] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.14.34 #0
+[ 3.491475] Stack : 00000000 00000000 00000000 00000000 80e0fce2 00000034 00000000 00000000
+[ 3.499764] 87c3838c 80696377 8061047c 00000000 00000001 00000001 87c2d850 6534689f
+[ 3.508059] 00000000 00000000 80e10000 00000000 00000000 000000cf 0000000f 00000000
+[ 3.516353] 00000000 806a0000 00076891 00000000 00000000 00000000 ffffffff 00000000
+[ 3.524648] 806c0000 00000004 80e10000 806a0000 00000003 80690000 00000000 80700000
+[ 3.532942] ...
+[ 3.535362] Call Trace:
+[ 3.537818] [<80010a48>] show_stack+0x58/0x100
+[ 3.542207] [<804c2f78>] dump_stack+0xe8/0x170
+[ 3.546613] [<80079f90>] save_trace+0xf0/0x110
+[ 3.551010] [<8007b1ec>] mark_lock+0x33c/0x78c
+[ 3.555413] [<8007bf48>] __lock_acquire+0x2ac/0x1a08
+[ 3.560337] [<8007de60>] lock_acquire+0x64/0x8c
+[ 3.564846] [<804e1570>] _raw_spin_lock_irqsave+0x54/0x78
+[ 3.570186] [<801b618c>] kernfs_notify+0x94/0xac
+[ 3.574770] [<801b7b10>] sysfs_notify+0x74/0xa0
+[ 3.579257] [<801b618c>] kernfs_notify+0x94/0xac
+[ 3.583839] [<801b7b10>] sysfs_notify+0x74/0xa0
+[ 3.588329] [<801b618c>] kernfs_notify+0x94/0xac
+[ 3.592911] [<801b7b10>] sysfs_notify+0x74/0xa0
+[ 3.597401] [<801b618c>] kernfs_notify+0x94/0xac
+[ 3.601983] [<801b7b10>] sysfs_notify+0x74/0xa0
+[ 3.606473] [<801b618c>] kernfs_notify+0x94/0xac
+[ 3.611055] [<801b7b10>] sysfs_notify+0x74/0xa0
+[ 3.615545] [<801b618c>] kernfs_notify+0x94/0xac
+[ 3.620125] [<801b7b10>] sysfs_notify+0x74/0xa0
+[ 3.624619] [<801b618c>] kernfs_notify+0x94/0xac
+[ 3.629197] [<801b7b10>] sysfs_notify+0x74/0xa0
+[ 3.633691] [<801b618c>] kernfs_notify+0x94/0xac
+[ 3.638269] [<801b7b10>] sysfs_notify+0x74/0xa0
+[ 3.642763] [<801b618c>] kernfs_notify+0x94/0xac
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -380,6 +380,8 @@ static inline int is_sp_move_ins(union m
+
+ if (ip->i_format.opcode == addiu_op ||
+ ip->i_format.opcode == daddiu_op) {
++ if (ip->i_format.simmediate > 0)
++ return 0;
+ *frame_size = -ip->i_format.simmediate;
+ return 1;
+ }
diff --git a/target/linux/generic/pending-5.10/110-ehci_hcd_ignore_oc.patch b/target/linux/generic/pending-5.10/110-ehci_hcd_ignore_oc.patch
new file mode 100644
index 0000000000..138d4fa1c5
--- /dev/null
+++ b/target/linux/generic/pending-5.10/110-ehci_hcd_ignore_oc.patch
@@ -0,0 +1,79 @@
+From: Florian Fainelli <florian@openwrt.org>
+Subject: USB: EHCI: add ignore_oc flag to disable overcurrent checking
+
+This patch adds an ignore_oc flag which can be set by EHCI controller
+not supporting or wanting to disable overcurrent checking. The EHCI
+platform data in include/linux/usb/ehci_pdriver.h is also augmented to
+take advantage of this new flag.
+
+Signed-off-by: Florian Fainelli <florian@openwrt.org>
+---
+ drivers/usb/host/ehci-hcd.c | 2 +-
+ drivers/usb/host/ehci-hub.c | 4 ++--
+ drivers/usb/host/ehci-platform.c | 1 +
+ drivers/usb/host/ehci.h | 1 +
+ include/linux/usb/ehci_pdriver.h | 1 +
+ 5 files changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/usb/host/ehci-hcd.c
++++ b/drivers/usb/host/ehci-hcd.c
+@@ -651,7 +651,7 @@ static int ehci_run (struct usb_hcd *hcd
+ "USB %x.%x started, EHCI %x.%02x%s\n",
+ ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f),
+ temp >> 8, temp & 0xff,
+- ignore_oc ? ", overcurrent ignored" : "");
++ (ignore_oc || ehci->ignore_oc) ? ", overcurrent ignored" : "");
+
+ ehci_writel(ehci, INTR_MASK,
+ &ehci->regs->intr_enable); /* Turn On Interrupts */
+--- a/drivers/usb/host/ehci-hub.c
++++ b/drivers/usb/host/ehci-hub.c
+@@ -643,7 +643,7 @@ ehci_hub_status_data (struct usb_hcd *hc
+ * always set, seem to clear PORT_OCC and PORT_CSC when writing to
+ * PORT_POWER; that's surprising, but maybe within-spec.
+ */
+- if (!ignore_oc)
++ if (!ignore_oc && !ehci->ignore_oc)
+ mask = PORT_CSC | PORT_PEC | PORT_OCC;
+ else
+ mask = PORT_CSC | PORT_PEC;
+@@ -1013,7 +1013,7 @@ int ehci_hub_control(
+ if (temp & PORT_PEC)
+ status |= USB_PORT_STAT_C_ENABLE << 16;
+
+- if ((temp & PORT_OCC) && !ignore_oc){
++ if ((temp & PORT_OCC) && (!ignore_oc && !ehci->ignore_oc)){
+ status |= USB_PORT_STAT_C_OVERCURRENT << 16;
+
+ /*
+--- a/drivers/usb/host/ehci-platform.c
++++ b/drivers/usb/host/ehci-platform.c
+@@ -327,6 +327,8 @@ static int ehci_platform_probe(struct pl
+ hcd->has_tt = 1;
+ if (pdata->reset_on_resume)
+ priv->reset_on_resume = true;
++ if (pdata->ignore_oc)
++ ehci->ignore_oc = 1;
+
+ #ifndef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
+ if (ehci->big_endian_mmio) {
+--- a/drivers/usb/host/ehci.h
++++ b/drivers/usb/host/ehci.h
+@@ -218,6 +218,7 @@ struct ehci_hcd { /* one per controlle
+ unsigned frame_index_bug:1; /* MosChip (AKA NetMos) */
+ unsigned need_oc_pp_cycle:1; /* MPC834X port power */
+ unsigned imx28_write_fix:1; /* For Freescale i.MX28 */
++ unsigned ignore_oc:1;
+
+ /* required for usb32 quirk */
+ #define OHCI_CTRL_HCFS (3 << 6)
+--- a/include/linux/usb/ehci_pdriver.h
++++ b/include/linux/usb/ehci_pdriver.h
+@@ -50,6 +50,7 @@ struct usb_ehci_pdata {
+ unsigned no_io_watchdog:1;
+ unsigned reset_on_resume:1;
+ unsigned dma_mask_64:1;
++ unsigned ignore_oc:1;
+
+ /* Turn on all power and clocks */
+ int (*power_on)(struct platform_device *pdev);
diff --git a/target/linux/generic/pending-5.10/120-Fix-alloc_node_mem_map-with-ARCH_PFN_OFFSET-calcu.patch b/target/linux/generic/pending-5.10/120-Fix-alloc_node_mem_map-with-ARCH_PFN_OFFSET-calcu.patch
new file mode 100644
index 0000000000..4e83b98724
--- /dev/null
+++ b/target/linux/generic/pending-5.10/120-Fix-alloc_node_mem_map-with-ARCH_PFN_OFFSET-calcu.patch
@@ -0,0 +1,82 @@
+From: Tobias Wolf <dev-NTEO@vplace.de>
+Subject: mm: Fix alloc_node_mem_map with ARCH_PFN_OFFSET calculation
+
+An rt288x (ralink) based router (Belkin F5D8235 v1) does not boot with any
+kernel beyond version 4.3 resulting in:
+
+BUG: Bad page state in process swapper pfn:086ac
+
+bisect resulted in:
+
+a1c34a3bf00af2cede839879502e12dc68491ad5 is the first bad commit
+commit a1c34a3bf00af2cede839879502e12dc68491ad5
+Author: Laura Abbott <laura@labbott.name>
+Date: Thu Nov 5 18:48:46 2015 -0800
+
+ mm: Don't offset memmap for flatmem
+
+ Srinivas Kandagatla reported bad page messages when trying to remove the
+ bottom 2MB on an ARM based IFC6410 board
+
+ BUG: Bad page state in process swapper pfn:fffa8
+ page:ef7fb500 count:0 mapcount:0 mapping: (null) index:0x0
+ flags: 0x96640253(locked|error|dirty|active|arch_1|reclaim|mlocked)
+ page dumped because: PAGE_FLAGS_CHECK_AT_FREE flag(s) set
+ bad because of flags:
+ flags: 0x200041(locked|active|mlocked)
+ Modules linked in:
+ CPU: 0 PID: 0 Comm: swapper Not tainted 3.19.0-rc3-00007-g412f9ba-dirty
+#816
+ Hardware name: Qualcomm (Flattened Device Tree)
+ unwind_backtrace
+ show_stack
+ dump_stack
+ bad_page
+ free_pages_prepare
+ free_hot_cold_page
+ __free_pages
+ free_highmem_page
+ mem_init
+ start_kernel
+ Disabling lock debugging due to kernel taint
+ [...]
+:040000 040000 2de013c372345fd471cd58f0553c9b38b0ef1cc4
+0a8156f848733dfa21e16c196dfb6c0a76290709 M mm
+
+This fix for ARM does not account ARCH_PFN_OFFSET for mem_map as later used by
+page_to_pfn anymore.
+
+The following output was generated with two hacked in printk statements:
+
+printk("before %p vs. %p or %p\n", mem_map, mem_map - offset, mem_map -
+(pgdat->node_start_pfn - ARCH_PFN_OFFSET));
+ if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
+ mem_map -= offset + (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
+printk("after %p\n", mem_map);
+
+Output:
+
+[ 0.000000] before 8861b280 vs. 8861b280 or 8851b280
+[ 0.000000] after 8851b280
+
+As seen in the first line mem_map with subtraction of offset does not equal the
+mem_map after subtraction of ARCH_PFN_OFFSET.
+
+After adding the offset of ARCH_PFN_OFFSET as well to mem_map as the
+previously calculated offset is zero for the named platform it is able to boot
+4.4 and 4.9-rc7 again.
+
+Signed-off-by: Tobias Wolf <dev-NTEO@vplace.de>
+---
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -6951,7 +6951,7 @@ static void __ref alloc_node_mem_map(str
+ if (pgdat == NODE_DATA(0)) {
+ mem_map = NODE_DATA(0)->node_mem_map;
+ if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
+- mem_map -= offset;
++ mem_map -= offset + (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
+ }
+ #endif
+ }
diff --git a/target/linux/generic/pending-5.10/130-add-linux-spidev-compatible-si3210.patch b/target/linux/generic/pending-5.10/130-add-linux-spidev-compatible-si3210.patch
new file mode 100644
index 0000000000..355e900a3b
--- /dev/null
+++ b/target/linux/generic/pending-5.10/130-add-linux-spidev-compatible-si3210.patch
@@ -0,0 +1,18 @@
+From: Giuseppe Lippolis <giu.lippolis@gmail.com>
+Subject: Add the linux,spidev compatible in spidev Several device in ramips have this binding in the dts
+
+Signed-off-by: Giuseppe Lippolis <giu.lippolis@gmail.com>
+---
+ drivers/spi/spidev.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/spi/spidev.c
++++ b/drivers/spi/spidev.c
+@@ -682,6 +682,7 @@ static const struct of_device_id spidev_
+ { .compatible = "lwn,bk4" },
+ { .compatible = "dh,dhcom-board" },
+ { .compatible = "menlo,m53cpld" },
++ { .compatible = "siliconlabs,si3210" },
+ {},
+ };
+ MODULE_DEVICE_TABLE(of, spidev_dt_ids);
diff --git a/target/linux/generic/pending-5.10/140-jffs2-use-.rename2-and-add-RENAME_WHITEOUT-support.patch b/target/linux/generic/pending-5.10/140-jffs2-use-.rename2-and-add-RENAME_WHITEOUT-support.patch
new file mode 100644
index 0000000000..e48da41fae
--- /dev/null
+++ b/target/linux/generic/pending-5.10/140-jffs2-use-.rename2-and-add-RENAME_WHITEOUT-support.patch
@@ -0,0 +1,78 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: jffs2: use .rename2 and add RENAME_WHITEOUT support
+
+It is required for renames on overlayfs
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/fs/jffs2/dir.c
++++ b/fs/jffs2/dir.c
+@@ -609,7 +609,8 @@ static int jffs2_rmdir (struct inode *di
+ return ret;
+ }
+
+-static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, umode_t mode, dev_t rdev)
++static int __jffs2_mknod (struct inode *dir_i, struct dentry *dentry,
++ umode_t mode, dev_t rdev, bool whiteout)
+ {
+ struct jffs2_inode_info *f, *dir_f;
+ struct jffs2_sb_info *c;
+@@ -748,7 +749,11 @@ static int jffs2_mknod (struct inode *di
+ mutex_unlock(&dir_f->sem);
+ jffs2_complete_reservation(c);
+
+- d_instantiate_new(dentry, inode);
++ if (!whiteout)
++ d_instantiate_new(dentry, inode);
++ else
++ unlock_new_inode(inode);
++
+ return 0;
+
+ fail:
+@@ -756,6 +761,17 @@ static int jffs2_mknod (struct inode *di
+ return ret;
+ }
+
++static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, umode_t mode, dev_t rdev)
++{
++ return __jffs2_mknod(dir_i, dentry, mode, rdev, false);
++}
++
++static int jffs2_whiteout (struct inode *old_dir, struct dentry *old_dentry)
++{
++ return __jffs2_mknod(old_dir, old_dentry, S_IFCHR | WHITEOUT_MODE,
++ WHITEOUT_DEV, true);
++}
++
+ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
+ struct inode *new_dir_i, struct dentry *new_dentry,
+ unsigned int flags)
+@@ -766,7 +782,7 @@ static int jffs2_rename (struct inode *o
+ uint8_t type;
+ uint32_t now;
+
+- if (flags & ~RENAME_NOREPLACE)
++ if (flags & ~(RENAME_NOREPLACE|RENAME_WHITEOUT))
+ return -EINVAL;
+
+ /* The VFS will check for us and prevent trying to rename a
+@@ -832,9 +848,14 @@ static int jffs2_rename (struct inode *o
+ if (d_is_dir(old_dentry) && !victim_f)
+ inc_nlink(new_dir_i);
+
+- /* Unlink the original */
+- ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i),
+- old_dentry->d_name.name, old_dentry->d_name.len, NULL, now);
++ if (flags & RENAME_WHITEOUT)
++ /* Replace with whiteout */
++ ret = jffs2_whiteout(old_dir_i, old_dentry);
++ else
++ /* Unlink the original */
++ ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i),
++ old_dentry->d_name.name,
++ old_dentry->d_name.len, NULL, now);
+
+ /* We don't touch inode->i_nlink */
+
diff --git a/target/linux/generic/pending-5.10/141-jffs2-add-RENAME_EXCHANGE-support.patch b/target/linux/generic/pending-5.10/141-jffs2-add-RENAME_EXCHANGE-support.patch
new file mode 100644
index 0000000000..dbc72339c6
--- /dev/null
+++ b/target/linux/generic/pending-5.10/141-jffs2-add-RENAME_EXCHANGE-support.patch
@@ -0,0 +1,73 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: jffs2: add RENAME_EXCHANGE support
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/fs/jffs2/dir.c
++++ b/fs/jffs2/dir.c
+@@ -779,18 +779,31 @@ static int jffs2_rename (struct inode *o
+ int ret;
+ struct jffs2_sb_info *c = JFFS2_SB_INFO(old_dir_i->i_sb);
+ struct jffs2_inode_info *victim_f = NULL;
++ struct inode *fst_inode = d_inode(old_dentry);
++ struct inode *snd_inode = d_inode(new_dentry);
+ uint8_t type;
+ uint32_t now;
+
+- if (flags & ~(RENAME_NOREPLACE|RENAME_WHITEOUT))
++ if (flags & ~(RENAME_NOREPLACE|RENAME_WHITEOUT|RENAME_EXCHANGE))
+ return -EINVAL;
+
++ if ((flags & RENAME_EXCHANGE) && (old_dir_i != new_dir_i)) {
++ if (S_ISDIR(fst_inode->i_mode) && !S_ISDIR(snd_inode->i_mode)) {
++ inc_nlink(new_dir_i);
++ drop_nlink(old_dir_i);
++ }
++ else if (!S_ISDIR(fst_inode->i_mode) && S_ISDIR(snd_inode->i_mode)) {
++ drop_nlink(new_dir_i);
++ inc_nlink(old_dir_i);
++ }
++ }
++
+ /* The VFS will check for us and prevent trying to rename a
+ * file over a directory and vice versa, but if it's a directory,
+ * the VFS can't check whether the victim is empty. The filesystem
+ * needs to do that for itself.
+ */
+- if (d_really_is_positive(new_dentry)) {
++ if (d_really_is_positive(new_dentry) && !(flags & RENAME_EXCHANGE)) {
+ victim_f = JFFS2_INODE_INFO(d_inode(new_dentry));
+ if (d_is_dir(new_dentry)) {
+ struct jffs2_full_dirent *fd;
+@@ -825,7 +838,7 @@ static int jffs2_rename (struct inode *o
+ if (ret)
+ return ret;
+
+- if (victim_f) {
++ if (victim_f && !(flags & RENAME_EXCHANGE)) {
+ /* There was a victim. Kill it off nicely */
+ if (d_is_dir(new_dentry))
+ clear_nlink(d_inode(new_dentry));
+@@ -851,6 +864,12 @@ static int jffs2_rename (struct inode *o
+ if (flags & RENAME_WHITEOUT)
+ /* Replace with whiteout */
+ ret = jffs2_whiteout(old_dir_i, old_dentry);
++ else if (flags & RENAME_EXCHANGE)
++ /* Replace the original */
++ ret = jffs2_do_link(c, JFFS2_INODE_INFO(old_dir_i),
++ d_inode(new_dentry)->i_ino, type,
++ old_dentry->d_name.name, old_dentry->d_name.len,
++ now);
+ else
+ /* Unlink the original */
+ ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i),
+@@ -882,7 +901,7 @@ static int jffs2_rename (struct inode *o
+ return ret;
+ }
+
+- if (d_is_dir(old_dentry))
++ if (d_is_dir(old_dentry) && !(flags & RENAME_EXCHANGE))
+ drop_nlink(old_dir_i);
+
+ new_dir_i->i_mtime = new_dir_i->i_ctime = old_dir_i->i_mtime = old_dir_i->i_ctime = ITIME(now);
diff --git a/target/linux/generic/pending-5.10/142-jffs2-add-splice-ops.patch b/target/linux/generic/pending-5.10/142-jffs2-add-splice-ops.patch
new file mode 100644
index 0000000000..de847a1f5c
--- /dev/null
+++ b/target/linux/generic/pending-5.10/142-jffs2-add-splice-ops.patch
@@ -0,0 +1,20 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: jffs2: add splice ops
+
+Add splice_read using generic_file_splice_read.
+Add splice_write using iter_file_splice_write
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/fs/jffs2/file.c
++++ b/fs/jffs2/file.c
+@@ -53,6 +53,8 @@ const struct file_operations jffs2_file_
+ .open = generic_file_open,
+ .read_iter = generic_file_read_iter,
+ .write_iter = generic_file_write_iter,
++ .splice_read = generic_file_splice_read,
++ .splice_write = iter_file_splice_write,
+ .unlocked_ioctl=jffs2_ioctl,
+ .mmap = generic_file_readonly_mmap,
+ .fsync = jffs2_fsync,
diff --git a/target/linux/generic/pending-5.10/150-bridge_allow_receiption_on_disabled_port.patch b/target/linux/generic/pending-5.10/150-bridge_allow_receiption_on_disabled_port.patch
new file mode 100644
index 0000000000..6de22b1c7d
--- /dev/null
+++ b/target/linux/generic/pending-5.10/150-bridge_allow_receiption_on_disabled_port.patch
@@ -0,0 +1,45 @@
+From: Stephen Hemminger <stephen@networkplumber.org>
+Subject: bridge: allow receiption on disabled port
+
+When an ethernet device is enslaved to a bridge, and the bridge STP
+detects loss of carrier (or operational state down), then normally
+packet receiption is blocked.
+
+This breaks control applications like WPA which maybe expecting to
+receive packets to negotiate to bring link up. The bridge needs to
+block forwarding packets from these disabled ports, but there is no
+hard requirement to not allow local packet delivery.
+
+Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+
+--- a/net/bridge/br_input.c
++++ b/net/bridge/br_input.c
+@@ -195,6 +195,9 @@ static void __br_handle_local_finish(str
+ /* note: already called with rcu_read_lock */
+ static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+ {
++ struct net_bridge_port *p = br_port_get_rcu(skb->dev);
++
++ if (p->state != BR_STATE_DISABLED)
+ __br_handle_local_finish(skb);
+
+ /* return 1 to signal the okfn() was called so it's ok to use the skb */
+@@ -348,6 +351,17 @@ static rx_handler_result_t br_handle_fra
+
+ forward:
+ switch (p->state) {
++ case BR_STATE_DISABLED:
++ if (ether_addr_equal(p->br->dev->dev_addr, dest))
++ skb->pkt_type = PACKET_HOST;
++
++ if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING,
++ dev_net(skb->dev), NULL, skb, skb->dev, NULL,
++ br_handle_local_finish) == 1) {
++ return RX_HANDLER_PASS;
++ }
++ break;
++
+ case BR_STATE_FORWARDING:
+ case BR_STATE_LEARNING:
+ if (ether_addr_equal(p->br->dev->dev_addr, dest))
diff --git a/target/linux/generic/pending-5.10/190-rtc-rs5c372-support_alarms_up_to_1_week.patch b/target/linux/generic/pending-5.10/190-rtc-rs5c372-support_alarms_up_to_1_week.patch
new file mode 100644
index 0000000000..13b79b5c09
--- /dev/null
+++ b/target/linux/generic/pending-5.10/190-rtc-rs5c372-support_alarms_up_to_1_week.patch
@@ -0,0 +1,94 @@
+From: Daniel González Cabanelas <dgcbueu@gmail.com>
+Subject: [PATCH 1/2] rtc: rs5c372: support alarms up to 1 week
+
+The Ricoh R2221x, R2223x, RS5C372, RV5C387A chips can handle 1 week
+alarms.
+
+Read the "wday" alarm register and convert it to a date to support up 1
+week in our driver.
+
+Signed-off-by: Daniel González Cabanelas <dgcbueu@gmail.com>
+---
+ drivers/rtc/rtc-rs5c372.c | 48 ++++++++++++++++++++++++++++++++++-----
+ 1 file changed, 42 insertions(+), 6 deletions(-)
+
+--- a/drivers/rtc/rtc-rs5c372.c
++++ b/drivers/rtc/rtc-rs5c372.c
+@@ -393,7 +393,9 @@ static int rs5c_read_alarm(struct device
+ {
+ struct i2c_client *client = to_i2c_client(dev);
+ struct rs5c372 *rs5c = i2c_get_clientdata(client);
+- int status;
++ int status, wday_offs;
++ struct rtc_time rtc;
++ unsigned long alarm_secs;
+
+ status = rs5c_get_regs(rs5c);
+ if (status < 0)
+@@ -403,6 +405,30 @@ static int rs5c_read_alarm(struct device
+ t->time.tm_sec = 0;
+ t->time.tm_min = bcd2bin(rs5c->regs[RS5C_REG_ALARM_A_MIN] & 0x7f);
+ t->time.tm_hour = rs5c_reg2hr(rs5c, rs5c->regs[RS5C_REG_ALARM_A_HOURS]);
++ t->time.tm_wday = ffs(rs5c->regs[RS5C_REG_ALARM_A_WDAY] & 0x7f) - 1;
++
++ /* determine the day, month and year based on alarm wday, taking as a
++ * reference the current time from the rtc
++ */
++ status = rs5c372_rtc_read_time(dev, &rtc);
++ if (status < 0)
++ return status;
++
++ wday_offs = t->time.tm_wday - rtc.tm_wday;
++ alarm_secs = mktime64(rtc.tm_year + 1900,
++ rtc.tm_mon + 1,
++ rtc.tm_mday + wday_offs,
++ t->time.tm_hour,
++ t->time.tm_min,
++ t->time.tm_sec);
++
++ if (wday_offs < 0 || (wday_offs == 0 &&
++ (t->time.tm_hour < rtc.tm_hour ||
++ (t->time.tm_hour == rtc.tm_hour &&
++ t->time.tm_min <= rtc.tm_min))))
++ alarm_secs += 7 * 86400;
++
++ rtc_time64_to_tm(alarm_secs, &t->time);
+
+ /* ... and status */
+ t->enabled = !!(rs5c->regs[RS5C_REG_CTRL1] & RS5C_CTRL1_AALE);
+@@ -417,12 +443,20 @@ static int rs5c_set_alarm(struct device
+ struct rs5c372 *rs5c = i2c_get_clientdata(client);
+ int status, addr, i;
+ unsigned char buf[3];
++ struct rtc_time rtc_tm;
++ unsigned long rtc_secs, alarm_secs;
+
+- /* only handle up to 24 hours in the future, like RTC_ALM_SET */
+- if (t->time.tm_mday != -1
+- || t->time.tm_mon != -1
+- || t->time.tm_year != -1)
++ /* chip only can handle alarms up to one week in the future*/
++ status = rs5c372_rtc_read_time(dev, &rtc_tm);
++ if (status)
++ return status;
++ rtc_secs = rtc_tm_to_time64(&rtc_tm);
++ alarm_secs = rtc_tm_to_time64(&t->time);
++ if (alarm_secs >= rtc_secs + 7 * 86400) {
++ dev_err(dev, "%s: alarm maximum is one week in the future (%d)\n",
++ __func__, status);
+ return -EINVAL;
++ }
+
+ /* REVISIT: round up tm_sec */
+
+@@ -443,7 +477,9 @@ static int rs5c_set_alarm(struct device
+ /* set alarm */
+ buf[0] = bin2bcd(t->time.tm_min);
+ buf[1] = rs5c_hr2reg(rs5c, t->time.tm_hour);
+- buf[2] = 0x7f; /* any/all days */
++ /* each bit is the day of the week, 0x7f means all days */
++ buf[2] = (t->time.tm_wday >= 0 && t->time.tm_wday < 7) ?
++ BIT(t->time.tm_wday) : 0x7f;
+
+ for (i = 0; i < sizeof(buf); i++) {
+ addr = RS5C_ADDR(RS5C_REG_ALARM_A_MIN + i);
diff --git a/target/linux/generic/pending-5.10/191-rtc-rs5c372-let_the_alarm_to_be_used_as_wakeup_source.patch b/target/linux/generic/pending-5.10/191-rtc-rs5c372-let_the_alarm_to_be_used_as_wakeup_source.patch
new file mode 100644
index 0000000000..7e9d0e66c0
--- /dev/null
+++ b/target/linux/generic/pending-5.10/191-rtc-rs5c372-let_the_alarm_to_be_used_as_wakeup_source.patch
@@ -0,0 +1,70 @@
+From: Daniel González Cabanelas <dgcbueu@gmail.com>
+Subject: [PATCH 2/2] rtc: rs5c372: let the alarm to be used as wakeup source
+
+Currently there is no use for the interrupts on the rs5c372 RTC and the
+wakealarm isn't enabled. There are some devices like NASes which use this
+RTC to wake up from the power off state when the INTR pin is activated by
+the alarm clock.
+
+Enable the alarm and let to be used as a wakeup source.
+
+Tested on a Buffalo LS421DE NAS.
+
+Signed-off-by: Daniel González Cabanelas <dgcbueu@gmail.com>
+---
+ drivers/rtc/rtc-rs5c372.c | 16 ++++++++++++++++
+ 1 file changed, 16 insertions(+)
+
+--- a/drivers/rtc/rtc-rs5c372.c
++++ b/drivers/rtc/rtc-rs5c372.c
+@@ -654,6 +654,7 @@ static int rs5c372_probe(struct i2c_clie
+ int err = 0;
+ int smbus_mode = 0;
+ struct rs5c372 *rs5c372;
++ bool rs5c372_can_wakeup_device = false;
+
+ dev_dbg(&client->dev, "%s\n", __func__);
+
+@@ -689,6 +690,12 @@ static int rs5c372_probe(struct i2c_clie
+ else
+ rs5c372->type = id->driver_data;
+
++#ifdef CONFIG_OF
++ if(of_property_read_bool(client->dev.of_node,
++ "wakeup-source"))
++ rs5c372_can_wakeup_device = true;
++#endif
++
+ /* we read registers 0x0f then 0x00-0x0f; skip the first one */
+ rs5c372->regs = &rs5c372->buf[1];
+ rs5c372->smbus = smbus_mode;
+@@ -722,6 +729,8 @@ static int rs5c372_probe(struct i2c_clie
+ goto exit;
+ }
+
++ rs5c372->has_irq = 1;
++
+ /* if the oscillator lost power and no other software (like
+ * the bootloader) set it up, do it here.
+ *
+@@ -748,6 +757,10 @@ static int rs5c372_probe(struct i2c_clie
+ );
+
+ /* REVISIT use client->irq to register alarm irq ... */
++ if (rs5c372_can_wakeup_device) {
++ device_init_wakeup(&client->dev, true);
++ }
++
+ rs5c372->rtc = devm_rtc_device_register(&client->dev,
+ rs5c372_driver.driver.name,
+ &rs5c372_rtc_ops, THIS_MODULE);
+@@ -761,6 +774,9 @@ static int rs5c372_probe(struct i2c_clie
+ if (err)
+ goto exit;
+
++ /* the rs5c372 alarm only supports a minute accuracy */
++ rs5c372->rtc->uie_unsupported = 1;
++
+ return 0;
+
+ exit:
diff --git a/target/linux/generic/pending-5.10/201-extra_optimization.patch b/target/linux/generic/pending-5.10/201-extra_optimization.patch
new file mode 100644
index 0000000000..53486680a2
--- /dev/null
+++ b/target/linux/generic/pending-5.10/201-extra_optimization.patch
@@ -0,0 +1,31 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: Upgrade to Linux 2.6.19
+
+- Includes large parts of the patch from #1021 by dpalffy
+- Includes RB532 NAND driver changes by n0-1
+
+[john@phrozen.org: feix will add this to his upstream queue]
+
+lede-commit: bff468813f78f81e36ebb2a3f4354de7365e640f
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ Makefile | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/Makefile
++++ b/Makefile
+@@ -733,11 +733,11 @@ KBUILD_CFLAGS += $(call cc-disable-warni
+ KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
+
+ ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE
+-KBUILD_CFLAGS += -O2
++KBUILD_CFLAGS += -O2 $(EXTRA_OPTIMIZATION)
+ else ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3
+-KBUILD_CFLAGS += -O3
++KBUILD_CFLAGS += -O3 $(EXTRA_OPTIMIZATION)
+ else ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
+-KBUILD_CFLAGS += -Os
++KBUILD_CFLAGS += -Os -fno-reorder-blocks -fno-tree-ch $(EXTRA_OPTIMIZATION)
+ endif
+
+ # Tell gcc to never replace conditional load with a non-conditional one
diff --git a/target/linux/generic/pending-5.10/203-kallsyms_uncompressed.patch b/target/linux/generic/pending-5.10/203-kallsyms_uncompressed.patch
new file mode 100644
index 0000000000..35d0333ad5
--- /dev/null
+++ b/target/linux/generic/pending-5.10/203-kallsyms_uncompressed.patch
@@ -0,0 +1,119 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: kernel: add a config option for keeping the kallsyms table uncompressed, saving ~9kb kernel size after lzma on ar71xx
+
+[john@phrozen.org: added to my upstream queue 30.12.2016]
+lede-commit: e0e3509b5ce2ccf93d4d67ea907613f5f7ec2eed
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ init/Kconfig | 11 +++++++++++
+ kernel/kallsyms.c | 8 ++++++++
+ scripts/kallsyms.c | 12 ++++++++++++
+ scripts/link-vmlinux.sh | 4 ++++
+ 4 files changed, 35 insertions(+)
+
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -1384,6 +1384,17 @@ config SYSCTL_ARCH_UNALIGN_ALLOW
+ the unaligned access emulation.
+ see arch/parisc/kernel/unaligned.c for reference
+
++config KALLSYMS_UNCOMPRESSED
++ bool "Keep kallsyms uncompressed"
++ depends on KALLSYMS
++ help
++ Normally kallsyms contains compressed symbols (using a token table),
++ reducing the uncompressed kernel image size. Keeping the symbol table
++ uncompressed significantly improves the size of this part in compressed
++ kernel images.
++
++ Say N unless you need compressed kernel images to be small.
++
+ config HAVE_PCSPKR_PLATFORM
+ bool
+
+--- a/kernel/kallsyms.c
++++ b/kernel/kallsyms.c
+@@ -77,6 +77,11 @@ static unsigned int kallsyms_expand_symb
+ * For every byte on the compressed symbol data, copy the table
+ * entry for that byte.
+ */
++#ifdef CONFIG_KALLSYMS_UNCOMPRESSED
++ memcpy(result, data + 1, len - 1);
++ result += len - 1;
++ len = 0;
++#endif
+ while (len) {
+ tptr = &kallsyms_token_table[kallsyms_token_index[*data]];
+ data++;
+@@ -109,6 +114,9 @@ tail:
+ */
+ static char kallsyms_get_symbol_type(unsigned int off)
+ {
++#ifdef CONFIG_KALLSYMS_UNCOMPRESSED
++ return kallsyms_names[off + 1];
++#endif
+ /*
+ * Get just the first code, look it up in the token table,
+ * and return the first char from this token.
+--- a/scripts/kallsyms.c
++++ b/scripts/kallsyms.c
+@@ -58,6 +58,7 @@ static struct addr_range percpu_range =
+ static struct sym_entry **table;
+ static unsigned int table_size, table_cnt;
+ static int all_symbols;
++static int uncompressed;
+ static int absolute_percpu;
+ static int base_relative;
+
+@@ -480,6 +481,9 @@ static void write_src(void)
+
+ free(markers);
+
++ if (uncompressed)
++ return;
++
+ output_label("kallsyms_token_table");
+ off = 0;
+ for (i = 0; i < 256; i++) {
+@@ -531,6 +535,9 @@ static unsigned char *find_token(unsigne
+ {
+ int i;
+
++ if (uncompressed)
++ return NULL;
++
+ for (i = 0; i < len - 1; i++) {
+ if (str[i] == token[0] && str[i+1] == token[1])
+ return &str[i];
+@@ -603,6 +610,9 @@ static void optimize_result(void)
+ {
+ int i, best;
+
++ if (uncompressed)
++ return;
++
+ /* using the '\0' symbol last allows compress_symbols to use standard
+ * fast string functions */
+ for (i = 255; i >= 0; i--) {
+@@ -767,6 +777,8 @@ int main(int argc, char **argv)
+ absolute_percpu = 1;
+ else if (strcmp(argv[i], "--base-relative") == 0)
+ base_relative = 1;
++ else if (strcmp(argv[i], "--uncompressed") == 0)
++ uncompressed = 1;
+ else
+ usage();
+ }
+--- a/scripts/link-vmlinux.sh
++++ b/scripts/link-vmlinux.sh
+@@ -186,6 +186,10 @@ kallsyms()
+ kallsymopt="${kallsymopt} --base-relative"
+ fi
+
++ if [ -n "${CONFIG_KALLSYMS_UNCOMPRESSED}" ]; then
++ kallsymopt="${kallsymopt} --uncompressed"
++ fi
++
+ info KSYMS ${2}
+ ${NM} -n ${1} | scripts/kallsyms ${kallsymopt} > ${2}
+ }
diff --git a/target/linux/generic/pending-5.10/205-backtrace_module_info.patch b/target/linux/generic/pending-5.10/205-backtrace_module_info.patch
new file mode 100644
index 0000000000..5953214757
--- /dev/null
+++ b/target/linux/generic/pending-5.10/205-backtrace_module_info.patch
@@ -0,0 +1,41 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: kernel: when KALLSYMS is disabled, print module address + size for matching backtrace entries
+
+[john@phrozen.org: felix will add this to his upstream queue]
+
+lede-commit 53827cdc824556cda910b23ce5030c363b8f1461
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ lib/vsprintf.c | 15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+--- a/lib/vsprintf.c
++++ b/lib/vsprintf.c
+@@ -957,8 +957,10 @@ char *symbol_string(char *buf, char *end
+ struct printf_spec spec, const char *fmt)
+ {
+ unsigned long value;
+-#ifdef CONFIG_KALLSYMS
+ char sym[KSYM_SYMBOL_LEN];
++#ifndef CONFIG_KALLSYMS
++ struct module *mod;
++ int len;
+ #endif
+
+ if (fmt[1] == 'R')
+@@ -975,8 +977,14 @@ char *symbol_string(char *buf, char *end
+
+ return string_nocheck(buf, end, sym, spec);
+ #else
+- return special_hex_number(buf, end, value, sizeof(void *));
++ len = snprintf(sym, sizeof(sym), "0x%lx", value);
++ mod = __module_address(value);
++ if (mod)
++ snprintf(sym + len, sizeof(sym) - len, " [%s@%p+0x%x]",
++ mod->name, mod->core_layout.base,
++ mod->core_layout.size);
+ #endif
++ return string(buf, end, sym, spec);
+ }
+
+ static const struct printf_spec default_str_spec = {
diff --git a/target/linux/generic/pending-5.10/240-remove-unsane-filenames-from-deps_initramfs-list.patch b/target/linux/generic/pending-5.10/240-remove-unsane-filenames-from-deps_initramfs-list.patch
new file mode 100644
index 0000000000..29cfade716
--- /dev/null
+++ b/target/linux/generic/pending-5.10/240-remove-unsane-filenames-from-deps_initramfs-list.patch
@@ -0,0 +1,30 @@
+From: Gabor Juhos <juhosg@openwrt.org>
+Subject: usr: sanitize deps_initramfs list
+
+If any filename in the intramfs dependency
+list contains a colon, that causes a kernel
+build error like this:
+
+/devel/openwrt/build_dir/linux-ar71xx_generic/linux-3.6.6/usr/Makefile:58: *** multiple target patterns. Stop.
+make[5]: *** [usr] Error 2
+
+Fix it by removing such filenames from the
+deps_initramfs list.
+
+Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ usr/Makefile | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/usr/Makefile
++++ b/usr/Makefile
+@@ -61,6 +61,8 @@ hostprogs := gen_init_cpio
+ # The dependency list is generated by gen_initramfs.sh -l
+ -include $(obj)/.initramfs_data.cpio.d
+
++deps_initramfs := $(foreach v,$(deps_initramfs),$(if $(findstring :,$(v)),,$(v)))
++
+ # do not try to update files included in initramfs
+ $(deps_initramfs): ;
+
diff --git a/target/linux/generic/pending-5.10/261-enable_wilink_platform_without_drivers.patch b/target/linux/generic/pending-5.10/261-enable_wilink_platform_without_drivers.patch
new file mode 100644
index 0000000000..cd31f9d934
--- /dev/null
+++ b/target/linux/generic/pending-5.10/261-enable_wilink_platform_without_drivers.patch
@@ -0,0 +1,20 @@
+From: Imre Kaloz <kaloz@openwrt.org>
+Subject: [PATCH] hack: net: wireless: make the wl12xx glue code available with
+ compat-wireless, too
+
+Signed-off-by: Imre Kaloz <kaloz@openwrt.org>
+---
+ drivers/net/wireless/ti/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/ti/Kconfig
++++ b/drivers/net/wireless/ti/Kconfig
+@@ -20,7 +20,7 @@ source "drivers/net/wireless/ti/wlcore/K
+
+ config WILINK_PLATFORM_DATA
+ bool "TI WiLink platform data"
+- depends on WLCORE_SDIO || WL1251_SDIO
++ depends on WLCORE_SDIO || WL1251_SDIO || ARCH_OMAP2PLUS
+ default y
+ help
+ Small platform data bit needed to pass data to the sdio modules.
diff --git a/target/linux/generic/pending-5.10/270-platform-mikrotik-build-bits.patch b/target/linux/generic/pending-5.10/270-platform-mikrotik-build-bits.patch
new file mode 100644
index 0000000000..df738ef97b
--- /dev/null
+++ b/target/linux/generic/pending-5.10/270-platform-mikrotik-build-bits.patch
@@ -0,0 +1,31 @@
+From c2deb5ef01a0ef09088832744cbace9e239a6ee0 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Thibaut=20VAR=C3=88NE?= <hacks@slashdirt.org>
+Date: Sat, 28 Mar 2020 12:11:50 +0100
+Subject: [PATCH] generic: platform/mikrotik build bits (5.4)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This patch adds platform/mikrotik kernel build bits
+
+Signed-off-by: Thibaut VARÈNE <hacks@slashdirt.org>
+---
+ drivers/platform/Kconfig | 2 ++
+ drivers/platform/Makefile | 1 +
+ 2 files changed, 3 insertions(+)
+
+--- a/drivers/platform/Kconfig
++++ b/drivers/platform/Kconfig
+@@ -13,3 +13,5 @@ source "drivers/platform/chrome/Kconfig"
+ source "drivers/platform/mellanox/Kconfig"
+
+ source "drivers/platform/olpc/Kconfig"
++
++source "drivers/platform/mikrotik/Kconfig"
+--- a/drivers/platform/Makefile
++++ b/drivers/platform/Makefile
+@@ -9,3 +9,4 @@ obj-$(CONFIG_MIPS) += mips/
+ obj-$(CONFIG_OLPC_EC) += olpc/
+ obj-$(CONFIG_GOLDFISH) += goldfish/
+ obj-$(CONFIG_CHROME_PLATFORMS) += chrome/
++obj-$(CONFIG_MIKROTIK) += mikrotik/
diff --git a/target/linux/generic/pending-5.10/300-mips_expose_boot_raw.patch b/target/linux/generic/pending-5.10/300-mips_expose_boot_raw.patch
new file mode 100644
index 0000000000..a454733e66
--- /dev/null
+++ b/target/linux/generic/pending-5.10/300-mips_expose_boot_raw.patch
@@ -0,0 +1,40 @@
+From: Mark Miller <mark@mirell.org>
+Subject: mips: expose CONFIG_BOOT_RAW
+
+This exposes the CONFIG_BOOT_RAW symbol in Kconfig. This is needed on
+certain Broadcom chipsets running CFE in order to load the kernel.
+
+Signed-off-by: Mark Miller <mark@mirell.org>
+Acked-by: Rob Landley <rob@landley.net>
+---
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -1077,9 +1077,6 @@ config FW_ARC
+ config ARCH_MAY_HAVE_PC_FDC
+ bool
+
+-config BOOT_RAW
+- bool
+-
+ config CEVT_BCM1480
+ bool
+
+@@ -3169,6 +3166,18 @@ choice
+ bool "Extend builtin kernel arguments with bootloader arguments"
+ endchoice
+
++config BOOT_RAW
++ bool "Enable the kernel to be executed from the load address"
++ default n
++ help
++ Allow the kernel to be executed from the load address for
++ bootloaders which cannot read the ELF format. This places
++ a jump to start_kernel at the load address.
++
++ If unsure, say N.
++
++
++
+ endmenu
+
+ config LOCKDEP_SUPPORT
diff --git a/target/linux/generic/pending-5.10/302-mips_no_branch_likely.patch b/target/linux/generic/pending-5.10/302-mips_no_branch_likely.patch
new file mode 100644
index 0000000000..271923fca8
--- /dev/null
+++ b/target/linux/generic/pending-5.10/302-mips_no_branch_likely.patch
@@ -0,0 +1,22 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: mips: use -mno-branch-likely for kernel and userspace
+
+saves ~11k kernel size after lzma and ~12k squashfs size in the
+
+lede-commit: 41a039f46450ffae9483d6216422098669da2900
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ arch/mips/Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/mips/Makefile
++++ b/arch/mips/Makefile
+@@ -95,7 +95,7 @@ all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlin
+ # machines may also. Since BFD is incredibly buggy with respect to
+ # crossformat linking we rely on the elf2ecoff tool for format conversion.
+ #
+-cflags-y += -G 0 -mno-abicalls -fno-pic -pipe
++cflags-y += -G 0 -mno-abicalls -fno-pic -pipe -mno-branch-likely
+ cflags-y += -msoft-float
+ LDFLAGS_vmlinux += -G 0 -static -n -nostdlib
+ KBUILD_AFLAGS_MODULE += -mlong-calls
diff --git a/target/linux/generic/pending-5.10/305-mips_module_reloc.patch b/target/linux/generic/pending-5.10/305-mips_module_reloc.patch
new file mode 100644
index 0000000000..839ba24293
--- /dev/null
+++ b/target/linux/generic/pending-5.10/305-mips_module_reloc.patch
@@ -0,0 +1,371 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: mips: replace -mlong-calls with -mno-long-calls to make function calls faster in kernel modules to achieve this, try to
+
+lede-commit: 3b3d64743ba2a874df9d70cd19e242205b0a788c
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ arch/mips/Makefile | 5 +
+ arch/mips/include/asm/module.h | 5 +
+ arch/mips/kernel/module.c | 279 ++++++++++++++++++++++++++++++++++++++++-
+ 3 files changed, 284 insertions(+), 5 deletions(-)
+
+--- a/arch/mips/Makefile
++++ b/arch/mips/Makefile
+@@ -98,8 +98,18 @@ all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlin
+ cflags-y += -G 0 -mno-abicalls -fno-pic -pipe -mno-branch-likely
+ cflags-y += -msoft-float
+ LDFLAGS_vmlinux += -G 0 -static -n -nostdlib
++ifdef CONFIG_64BIT
+ KBUILD_AFLAGS_MODULE += -mlong-calls
+ KBUILD_CFLAGS_MODULE += -mlong-calls
++else
++ ifdef CONFIG_DYNAMIC_FTRACE
++ KBUILD_AFLAGS_MODULE += -mlong-calls
++ KBUILD_CFLAGS_MODULE += -mlong-calls
++ else
++ KBUILD_AFLAGS_MODULE += -mno-long-calls
++ KBUILD_CFLAGS_MODULE += -mno-long-calls
++ endif
++endif
+
+ ifeq ($(CONFIG_RELOCATABLE),y)
+ LDFLAGS_vmlinux += --emit-relocs
+--- a/arch/mips/include/asm/module.h
++++ b/arch/mips/include/asm/module.h
+@@ -12,6 +12,11 @@ struct mod_arch_specific {
+ const struct exception_table_entry *dbe_start;
+ const struct exception_table_entry *dbe_end;
+ struct mips_hi16 *r_mips_hi16_list;
++
++ void *phys_plt_tbl;
++ void *virt_plt_tbl;
++ unsigned int phys_plt_offset;
++ unsigned int virt_plt_offset;
+ };
+
+ typedef uint8_t Elf64_Byte; /* Type for a 8-bit quantity. */
+--- a/arch/mips/kernel/module.c
++++ b/arch/mips/kernel/module.c
+@@ -31,14 +31,221 @@ struct mips_hi16 {
+ static LIST_HEAD(dbe_list);
+ static DEFINE_SPINLOCK(dbe_lock);
+
+-#ifdef MODULE_START
++/*
++ * Get the potential max trampolines size required of the init and
++ * non-init sections. Only used if we cannot find enough contiguous
++ * physically mapped memory to put the module into.
++ */
++static unsigned int
++get_plt_size(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
++ const char *secstrings, unsigned int symindex, bool is_init)
++{
++ unsigned long ret = 0;
++ unsigned int i, j;
++ Elf_Sym *syms;
++
++ /* Everything marked ALLOC (this includes the exported symbols) */
++ for (i = 1; i < hdr->e_shnum; ++i) {
++ unsigned int info = sechdrs[i].sh_info;
++
++ if (sechdrs[i].sh_type != SHT_REL
++ && sechdrs[i].sh_type != SHT_RELA)
++ continue;
++
++ /* Not a valid relocation section? */
++ if (info >= hdr->e_shnum)
++ continue;
++
++ /* Don't bother with non-allocated sections */
++ if (!(sechdrs[info].sh_flags & SHF_ALLOC))
++ continue;
++
++ /* If it's called *.init*, and we're not init, we're
++ not interested */
++ if ((strstr(secstrings + sechdrs[i].sh_name, ".init") != 0)
++ != is_init)
++ continue;
++
++ syms = (Elf_Sym *) sechdrs[symindex].sh_addr;
++ if (sechdrs[i].sh_type == SHT_REL) {
++ Elf_Mips_Rel *rel = (void *) sechdrs[i].sh_addr;
++ unsigned int size = sechdrs[i].sh_size / sizeof(*rel);
++
++ for (j = 0; j < size; ++j) {
++ Elf_Sym *sym;
++
++ if (ELF_MIPS_R_TYPE(rel[j]) != R_MIPS_26)
++ continue;
++
++ sym = syms + ELF_MIPS_R_SYM(rel[j]);
++ if (!is_init && sym->st_shndx != SHN_UNDEF)
++ continue;
++
++ ret += 4 * sizeof(int);
++ }
++ } else {
++ Elf_Mips_Rela *rela = (void *) sechdrs[i].sh_addr;
++ unsigned int size = sechdrs[i].sh_size / sizeof(*rela);
++
++ for (j = 0; j < size; ++j) {
++ Elf_Sym *sym;
++
++ if (ELF_MIPS_R_TYPE(rela[j]) != R_MIPS_26)
++ continue;
++
++ sym = syms + ELF_MIPS_R_SYM(rela[j]);
++ if (!is_init && sym->st_shndx != SHN_UNDEF)
++ continue;
++
++ ret += 4 * sizeof(int);
++ }
++ }
++ }
++
++ return ret;
++}
++
++#ifndef MODULE_START
++static void *alloc_phys(unsigned long size)
++{
++ unsigned order;
++ struct page *page;
++ struct page *p;
++
++ size = PAGE_ALIGN(size);
++ order = get_order(size);
++
++ page = alloc_pages(GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN |
++ __GFP_THISNODE, order);
++ if (!page)
++ return NULL;
++
++ split_page(page, order);
++
++ /* mark all pages except for the last one */
++ for (p = page; p + 1 < page + (size >> PAGE_SHIFT); ++p)
++ set_bit(PG_owner_priv_1, &p->flags);
++
++ for (p = page + (size >> PAGE_SHIFT); p < page + (1 << order); ++p)
++ __free_page(p);
++
++ return page_address(page);
++}
++#endif
++
++static void free_phys(void *ptr)
++{
++ struct page *page;
++ bool free;
++
++ page = virt_to_page(ptr);
++ do {
++ free = test_and_clear_bit(PG_owner_priv_1, &page->flags);
++ __free_page(page);
++ page++;
++ } while (free);
++}
++
++
+ void *module_alloc(unsigned long size)
+ {
++#ifdef MODULE_START
+ return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END,
+ GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
+ __builtin_return_address(0));
++#else
++ void *ptr;
++
++ if (size == 0)
++ return NULL;
++
++ ptr = alloc_phys(size);
++
++ /* If we failed to allocate physically contiguous memory,
++ * fall back to regular vmalloc. The module loader code will
++ * create jump tables to handle long jumps */
++ if (!ptr)
++ return vmalloc(size);
++
++ return ptr;
++#endif
+ }
++
++static inline bool is_phys_addr(void *ptr)
++{
++#ifdef CONFIG_64BIT
++ return (KSEGX((unsigned long)ptr) == CKSEG0);
++#else
++ return (KSEGX(ptr) == KSEG0);
+ #endif
++}
++
++/* Free memory returned from module_alloc */
++void module_memfree(void *module_region)
++{
++ if (is_phys_addr(module_region))
++ free_phys(module_region);
++ else
++ vfree(module_region);
++}
++
++static void *__module_alloc(int size, bool phys)
++{
++ void *ptr;
++
++ if (phys)
++ ptr = kmalloc(size, GFP_KERNEL);
++ else
++ ptr = vmalloc(size);
++ return ptr;
++}
++
++static void __module_free(void *ptr)
++{
++ if (is_phys_addr(ptr))
++ kfree(ptr);
++ else
++ vfree(ptr);
++}
++
++int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
++ char *secstrings, struct module *mod)
++{
++ unsigned int symindex = 0;
++ unsigned int core_size, init_size;
++ int i;
++
++ mod->arch.phys_plt_offset = 0;
++ mod->arch.virt_plt_offset = 0;
++ mod->arch.phys_plt_tbl = NULL;
++ mod->arch.virt_plt_tbl = NULL;
++
++ if (IS_ENABLED(CONFIG_64BIT))
++ return 0;
++
++ for (i = 1; i < hdr->e_shnum; i++)
++ if (sechdrs[i].sh_type == SHT_SYMTAB)
++ symindex = i;
++
++ core_size = get_plt_size(hdr, sechdrs, secstrings, symindex, false);
++ init_size = get_plt_size(hdr, sechdrs, secstrings, symindex, true);
++
++ if ((core_size + init_size) == 0)
++ return 0;
++
++ mod->arch.phys_plt_tbl = __module_alloc(core_size + init_size, 1);
++ if (!mod->arch.phys_plt_tbl)
++ return -ENOMEM;
++
++ mod->arch.virt_plt_tbl = __module_alloc(core_size + init_size, 0);
++ if (!mod->arch.virt_plt_tbl) {
++ __module_free(mod->arch.phys_plt_tbl);
++ mod->arch.phys_plt_tbl = NULL;
++ return -ENOMEM;
++ }
++
++ return 0;
++}
+
+ static int apply_r_mips_none(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
+@@ -54,9 +261,40 @@ static int apply_r_mips_32(struct module
+ return 0;
+ }
+
++static Elf_Addr add_plt_entry_to(unsigned *plt_offset,
++ void *start, Elf_Addr v)
++{
++ unsigned *tramp = start + *plt_offset;
++ *plt_offset += 4 * sizeof(int);
++
++ /* adjust carry for addiu */
++ if (v & 0x00008000)
++ v += 0x10000;
++
++ tramp[0] = 0x3c190000 | (v >> 16); /* lui t9, hi16 */
++ tramp[1] = 0x27390000 | (v & 0xffff); /* addiu t9, t9, lo16 */
++ tramp[2] = 0x03200008; /* jr t9 */
++ tramp[3] = 0x00000000; /* nop */
++
++ return (Elf_Addr) tramp;
++}
++
++static Elf_Addr add_plt_entry(struct module *me, void *location, Elf_Addr v)
++{
++ if (is_phys_addr(location))
++ return add_plt_entry_to(&me->arch.phys_plt_offset,
++ me->arch.phys_plt_tbl, v);
++ else
++ return add_plt_entry_to(&me->arch.virt_plt_offset,
++ me->arch.virt_plt_tbl, v);
++
++}
++
+ static int apply_r_mips_26(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
+ {
++ u32 ofs = base & 0x03ffffff;
++
+ if (v % 4) {
+ pr_err("module %s: dangerous R_MIPS_26 relocation\n",
+ me->name);
+@@ -64,13 +302,17 @@ static int apply_r_mips_26(struct module
+ }
+
+ if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
+- pr_err("module %s: relocation overflow\n",
+- me->name);
+- return -ENOEXEC;
++ v = add_plt_entry(me, location, v + (ofs << 2));
++ if (!v) {
++ pr_err("module %s: relocation overflow\n",
++ me->name);
++ return -ENOEXEC;
++ }
++ ofs = 0;
+ }
+
+ *location = (*location & ~0x03ffffff) |
+- ((base + (v >> 2)) & 0x03ffffff);
++ ((ofs + (v >> 2)) & 0x03ffffff);
+
+ return 0;
+ }
+@@ -446,9 +688,36 @@ int module_finalize(const Elf_Ehdr *hdr,
+ list_add(&me->arch.dbe_list, &dbe_list);
+ spin_unlock_irq(&dbe_lock);
+ }
++
++ /* Get rid of the fixup trampoline if we're running the module
++ * from physically mapped address space */
++ if (me->arch.phys_plt_offset == 0) {
++ __module_free(me->arch.phys_plt_tbl);
++ me->arch.phys_plt_tbl = NULL;
++ }
++ if (me->arch.virt_plt_offset == 0) {
++ __module_free(me->arch.virt_plt_tbl);
++ me->arch.virt_plt_tbl = NULL;
++ }
++
+ return 0;
+ }
+
++void module_arch_freeing_init(struct module *mod)
++{
++ if (mod->state == MODULE_STATE_LIVE)
++ return;
++
++ if (mod->arch.phys_plt_tbl) {
++ __module_free(mod->arch.phys_plt_tbl);
++ mod->arch.phys_plt_tbl = NULL;
++ }
++ if (mod->arch.virt_plt_tbl) {
++ __module_free(mod->arch.virt_plt_tbl);
++ mod->arch.virt_plt_tbl = NULL;
++ }
++}
++
+ void module_arch_cleanup(struct module *mod)
+ {
+ spin_lock_irq(&dbe_lock);
diff --git a/target/linux/generic/pending-5.10/307-mips_highmem_offset.patch b/target/linux/generic/pending-5.10/307-mips_highmem_offset.patch
new file mode 100644
index 0000000000..e1ada22f34
--- /dev/null
+++ b/target/linux/generic/pending-5.10/307-mips_highmem_offset.patch
@@ -0,0 +1,19 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: kernel: adjust mips highmem offset to avoid the need for -mlong-calls on systems with >256M RAM
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ arch/mips/include/asm/mach-generic/spaces.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/mips/include/asm/mach-generic/spaces.h
++++ b/arch/mips/include/asm/mach-generic/spaces.h
+@@ -54,7 +54,7 @@
+ * Memory above this physical address will be considered highmem.
+ */
+ #ifndef HIGHMEM_START
+-#define HIGHMEM_START _AC(0x20000000, UL)
++#define HIGHMEM_START _AC(0x10000000, UL)
+ #endif
+
+ #endif /* CONFIG_32BIT */
diff --git a/target/linux/generic/pending-5.10/308-mips32r2_tune.patch b/target/linux/generic/pending-5.10/308-mips32r2_tune.patch
new file mode 100644
index 0000000000..54c0e01023
--- /dev/null
+++ b/target/linux/generic/pending-5.10/308-mips32r2_tune.patch
@@ -0,0 +1,22 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: kernel: add -mtune=34kc to MIPS CFLAGS when building for mips32r2
+
+This provides a good tradeoff across at least 24Kc-74Kc, while also
+producing smaller code.
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ arch/mips/Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/mips/Makefile
++++ b/arch/mips/Makefile
+@@ -155,7 +155,7 @@ cflags-$(CONFIG_CPU_VR41XX) += -march=r4
+ cflags-$(CONFIG_CPU_R4X00) += -march=r4600 -Wa,--trap
+ cflags-$(CONFIG_CPU_TX49XX) += -march=r4600 -Wa,--trap
+ cflags-$(CONFIG_CPU_MIPS32_R1) += -march=mips32 -Wa,--trap
+-cflags-$(CONFIG_CPU_MIPS32_R2) += -march=mips32r2 -Wa,--trap
++cflags-$(CONFIG_CPU_MIPS32_R2) += -march=mips32r2 -mtune=34kc -Wa,--trap
+ cflags-$(CONFIG_CPU_MIPS32_R5) += -march=mips32r5 -Wa,--trap -modd-spreg
+ cflags-$(CONFIG_CPU_MIPS32_R6) += -march=mips32r6 -Wa,--trap -modd-spreg
+ cflags-$(CONFIG_CPU_MIPS64_R1) += -march=mips64 -Wa,--trap
diff --git a/target/linux/generic/pending-5.10/309-MIPS-Add-CPU-option-reporting-to-proc-cpuinfo.patch b/target/linux/generic/pending-5.10/309-MIPS-Add-CPU-option-reporting-to-proc-cpuinfo.patch
new file mode 100644
index 0000000000..794f027f18
--- /dev/null
+++ b/target/linux/generic/pending-5.10/309-MIPS-Add-CPU-option-reporting-to-proc-cpuinfo.patch
@@ -0,0 +1,140 @@
+From 87ec87c2ad615c1a177cd08ef5fa29fc739f6e50 Mon Sep 17 00:00:00 2001
+From: Hauke Mehrtens <hauke@hauke-m.de>
+Date: Sun, 23 Dec 2018 18:06:53 +0100
+Subject: [PATCH] MIPS: Add CPU option reporting to /proc/cpuinfo
+
+Many MIPS CPUs have optional CPU features which are not activates for
+all CPU cores. Print the CPU options which are implemented in the core
+in /proc/cpuinfo. This makes it possible to see what features are
+supported and which are not supported. This should cover all standard
+MIPS extensions, before it only printed information about the main MIPS
+ASEs.
+
+Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
+---
+ arch/mips/kernel/proc.c | 116 ++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 116 insertions(+)
+
+--- a/arch/mips/kernel/proc.c
++++ b/arch/mips/kernel/proc.c
+@@ -138,6 +138,120 @@ static int show_cpuinfo(struct seq_file
+ seq_printf(m, "micromips kernel\t: %s\n",
+ (read_c0_config3() & MIPS_CONF3_ISA_OE) ? "yes" : "no");
+ }
++
++ seq_printf(m, "Options implemented\t:");
++ if (cpu_has_tlb)
++ seq_printf(m, "%s", " tlb");
++ if (cpu_has_ftlb)
++ seq_printf(m, "%s", " ftlb");
++ if (cpu_has_tlbinv)
++ seq_printf(m, "%s", " tlbinv");
++ if (cpu_has_segments)
++ seq_printf(m, "%s", " segments");
++ if (cpu_has_rixiex)
++ seq_printf(m, "%s", " rixiex");
++ if (cpu_has_ldpte)
++ seq_printf(m, "%s", " ldpte");
++ if (cpu_has_maar)
++ seq_printf(m, "%s", " maar");
++ if (cpu_has_rw_llb)
++ seq_printf(m, "%s", " rw_llb");
++ if (cpu_has_4kex)
++ seq_printf(m, "%s", " 4kex");
++ if (cpu_has_3k_cache)
++ seq_printf(m, "%s", " 3k_cache");
++ if (cpu_has_4k_cache)
++ seq_printf(m, "%s", " 4k_cache");
++ if (cpu_has_6k_cache)
++ seq_printf(m, "%s", " 6k_cache");
++ if (cpu_has_8k_cache)
++ seq_printf(m, "%s", " 8k_cache");
++ if (cpu_has_tx39_cache)
++ seq_printf(m, "%s", " tx39_cache");
++ if (cpu_has_octeon_cache)
++ seq_printf(m, "%s", " octeon_cache");
++ if (cpu_has_fpu)
++ seq_printf(m, "%s", " fpu");
++ if (cpu_has_32fpr)
++ seq_printf(m, "%s", " 32fpr");
++ if (cpu_has_cache_cdex_p)
++ seq_printf(m, "%s", " cache_cdex_p");
++ if (cpu_has_cache_cdex_s)
++ seq_printf(m, "%s", " cache_cdex_s");
++ if (cpu_has_prefetch)
++ seq_printf(m, "%s", " prefetch");
++ if (cpu_has_mcheck)
++ seq_printf(m, "%s", " mcheck");
++ if (cpu_has_ejtag)
++ seq_printf(m, "%s", " ejtag");
++ if (cpu_has_llsc)
++ seq_printf(m, "%s", " llsc");
++ if (cpu_has_guestctl0ext)
++ seq_printf(m, "%s", " guestctl0ext");
++ if (cpu_has_guestctl1)
++ seq_printf(m, "%s", " guestctl1");
++ if (cpu_has_guestctl2)
++ seq_printf(m, "%s", " guestctl2");
++ if (cpu_has_guestid)
++ seq_printf(m, "%s", " guestid");
++ if (cpu_has_drg)
++ seq_printf(m, "%s", " drg");
++ if (cpu_has_rixi)
++ seq_printf(m, "%s", " rixi");
++ if (cpu_has_lpa)
++ seq_printf(m, "%s", " lpa");
++ if (cpu_has_mvh)
++ seq_printf(m, "%s", " mvh");
++ if (cpu_has_vtag_icache)
++ seq_printf(m, "%s", " vtag_icache");
++ if (cpu_has_dc_aliases)
++ seq_printf(m, "%s", " dc_aliases");
++ if (cpu_has_ic_fills_f_dc)
++ seq_printf(m, "%s", " ic_fills_f_dc");
++ if (cpu_has_pindexed_dcache)
++ seq_printf(m, "%s", " pindexed_dcache");
++ if (cpu_has_userlocal)
++ seq_printf(m, "%s", " userlocal");
++ if (cpu_has_nofpuex)
++ seq_printf(m, "%s", " nofpuex");
++ if (cpu_has_vint)
++ seq_printf(m, "%s", " vint");
++ if (cpu_has_veic)
++ seq_printf(m, "%s", " veic");
++ if (cpu_has_inclusive_pcaches)
++ seq_printf(m, "%s", " inclusive_pcaches");
++ if (cpu_has_perf_cntr_intr_bit)
++ seq_printf(m, "%s", " perf_cntr_intr_bit");
++ if (cpu_has_ufr)
++ seq_printf(m, "%s", " ufr");
++ if (cpu_has_fre)
++ seq_printf(m, "%s", " fre");
++ if (cpu_has_cdmm)
++ seq_printf(m, "%s", " cdmm");
++ if (cpu_has_small_pages)
++ seq_printf(m, "%s", " small_pages");
++ if (cpu_has_nan_legacy)
++ seq_printf(m, "%s", " nan_legacy");
++ if (cpu_has_nan_2008)
++ seq_printf(m, "%s", " nan_2008");
++ if (cpu_has_ebase_wg)
++ seq_printf(m, "%s", " ebase_wg");
++ if (cpu_has_badinstr)
++ seq_printf(m, "%s", " badinstr");
++ if (cpu_has_badinstrp)
++ seq_printf(m, "%s", " badinstrp");
++ if (cpu_has_contextconfig)
++ seq_printf(m, "%s", " contextconfig");
++ if (cpu_has_perf)
++ seq_printf(m, "%s", " perf");
++ if (cpu_has_shared_ftlb_ram)
++ seq_printf(m, "%s", " shared_ftlb_ram");
++ if (cpu_has_shared_ftlb_entries)
++ seq_printf(m, "%s", " shared_ftlb_entries");
++ if (cpu_has_mipsmt_pertccounters)
++ seq_printf(m, "%s", " mipsmt_pertccounters");
++ seq_printf(m, "\n");
++
+ seq_printf(m, "shadow register sets\t: %d\n",
+ cpu_data[n].srsets);
+ seq_printf(m, "kscratch registers\t: %d\n",
diff --git a/target/linux/generic/pending-5.10/310-arm_module_unresolved_weak_sym.patch b/target/linux/generic/pending-5.10/310-arm_module_unresolved_weak_sym.patch
new file mode 100644
index 0000000000..191dc6ac3c
--- /dev/null
+++ b/target/linux/generic/pending-5.10/310-arm_module_unresolved_weak_sym.patch
@@ -0,0 +1,22 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: fix errors in unresolved weak symbols on arm
+
+lede-commit: 570699d4838a907c3ef9f2819bf19eb72997b32f
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ arch/arm/kernel/module.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/arm/kernel/module.c
++++ b/arch/arm/kernel/module.c
+@@ -105,6 +105,10 @@ apply_relocate(Elf32_Shdr *sechdrs, cons
+ return -ENOEXEC;
+ }
+
++ if ((IS_ERR_VALUE(sym->st_value) || !sym->st_value) &&
++ ELF_ST_BIND(sym->st_info) == STB_WEAK)
++ continue;
++
+ loc = dstsec->sh_addr + rel->r_offset;
+
+ switch (ELF32_R_TYPE(rel->r_info)) {
diff --git a/target/linux/generic/pending-5.10/311-MIPS-zboot-put-appended-dtb-into-a-section.patch b/target/linux/generic/pending-5.10/311-MIPS-zboot-put-appended-dtb-into-a-section.patch
new file mode 100644
index 0000000000..3f8808f702
--- /dev/null
+++ b/target/linux/generic/pending-5.10/311-MIPS-zboot-put-appended-dtb-into-a-section.patch
@@ -0,0 +1,36 @@
+From 7d1531c81c0fb4c93bea8dc316043ad0e4d0c270 Mon Sep 17 00:00:00 2001
+From: Chuanhong Guo <gch981213@gmail.com>
+Date: Sun, 25 Oct 2020 23:19:40 +0800
+Subject: [PATCH] MIPS: zboot: put appended dtb into a section
+
+This will make a separated section for dtb appear in ELF, and we can
+then use objcopy to patch a dtb into vmlinuz when RAW_APPENDED_DTB
+is set in kernel config.
+
+command to patch a dtb:
+objcopy --set-section-flags=.appended_dtb=alloc,contents \
+ --update-section=.appended_dtb=<target>.dtb vmlinuz vmlinuz-dtb
+
+Signed-off-by: Chuanhong Guo <gch981213@gmail.com>
+---
+ arch/mips/boot/compressed/ld.script | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/arch/mips/boot/compressed/ld.script
++++ b/arch/mips/boot/compressed/ld.script
+@@ -31,9 +31,12 @@ SECTIONS
+ CONSTRUCTORS
+ . = ALIGN(16);
+ }
+- __appended_dtb = .;
+- /* leave space for appended DTB */
+- . += 0x100000;
++
++ .appended_dtb : {
++ __appended_dtb = .;
++ /* leave space for appended DTB */
++ . += 0x100000;
++ }
+
+ _edata = .;
+ /* End of data section */
diff --git a/target/linux/generic/pending-5.10/330-MIPS-kexec-Accept-command-line-parameters-from-users.patch b/target/linux/generic/pending-5.10/330-MIPS-kexec-Accept-command-line-parameters-from-users.patch
new file mode 100644
index 0000000000..5a0e44b76b
--- /dev/null
+++ b/target/linux/generic/pending-5.10/330-MIPS-kexec-Accept-command-line-parameters-from-users.patch
@@ -0,0 +1,281 @@
+From: Yousong Zhou <yszhou4tech@gmail.com>
+Subject: MIPS: kexec: Accept command line parameters from userspace.
+
+Signed-off-by: Yousong Zhou <yszhou4tech@gmail.com>
+---
+ arch/mips/kernel/machine_kexec.c | 153 +++++++++++++++++++++++++++++++-----
+ arch/mips/kernel/machine_kexec.h | 20 +++++
+ arch/mips/kernel/relocate_kernel.S | 21 +++--
+ 3 files changed, 167 insertions(+), 27 deletions(-)
+ create mode 100644 arch/mips/kernel/machine_kexec.h
+
+--- a/arch/mips/kernel/machine_kexec.c
++++ b/arch/mips/kernel/machine_kexec.c
+@@ -9,14 +9,11 @@
+ #include <linux/delay.h>
+ #include <linux/libfdt.h>
+
++#include <asm/bootinfo.h>
+ #include <asm/cacheflush.h>
+ #include <asm/page.h>
+-
+-extern const unsigned char relocate_new_kernel[];
+-extern const size_t relocate_new_kernel_size;
+-
+-extern unsigned long kexec_start_address;
+-extern unsigned long kexec_indirection_page;
++#include <linux/uaccess.h>
++#include "machine_kexec.h"
+
+ static unsigned long reboot_code_buffer;
+
+@@ -30,6 +27,101 @@ void (*_crash_smp_send_stop)(void) = NUL
+ void (*_machine_kexec_shutdown)(void) = NULL;
+ void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL;
+
++static void machine_kexec_print_args(void)
++{
++ unsigned long argc = (int)kexec_args[0];
++ int i;
++
++ pr_info("kexec_args[0] (argc): %lu\n", argc);
++ pr_info("kexec_args[1] (argv): %p\n", (void *)kexec_args[1]);
++ pr_info("kexec_args[2] (env ): %p\n", (void *)kexec_args[2]);
++ pr_info("kexec_args[3] (desc): %p\n", (void *)kexec_args[3]);
++
++ for (i = 0; i < argc; i++) {
++ pr_info("kexec_argv[%d] = %p, %s\n",
++ i, kexec_argv[i], kexec_argv[i]);
++ }
++}
++
++static void machine_kexec_init_argv(struct kimage *image)
++{
++ void __user *buf = NULL;
++ size_t bufsz;
++ size_t size;
++ int i;
++
++ bufsz = 0;
++ for (i = 0; i < image->nr_segments; i++) {
++ struct kexec_segment *seg;
++
++ seg = &image->segment[i];
++ if (seg->bufsz < 6)
++ continue;
++
++ if (strncmp((char *) seg->buf, "kexec ", 6))
++ continue;
++
++ buf = seg->buf;
++ bufsz = seg->bufsz;
++ break;
++ }
++
++ if (!buf)
++ return;
++
++ size = KEXEC_COMMAND_LINE_SIZE;
++ size = min(size, bufsz);
++ if (size < bufsz)
++ pr_warn("kexec command line truncated to %zd bytes\n", size);
++
++ /* Copy to kernel space */
++ if (copy_from_user(kexec_argv_buf, buf, size))
++ pr_warn("kexec command line copy to kernel space failed\n");
++
++ kexec_argv_buf[size - 1] = 0;
++}
++
++static void machine_kexec_parse_argv(struct kimage *image)
++{
++ char *reboot_code_buffer;
++ int reloc_delta;
++ char *ptr;
++ int argc;
++ int i;
++
++ ptr = kexec_argv_buf;
++ argc = 0;
++
++ /*
++ * convert command line string to array of parameters
++ * (as bootloader does).
++ */
++ while (ptr && *ptr && (KEXEC_MAX_ARGC > argc)) {
++ if (*ptr == ' ') {
++ *ptr++ = '\0';
++ continue;
++ }
++
++ kexec_argv[argc++] = ptr;
++ ptr = strchr(ptr, ' ');
++ }
++
++ if (!argc)
++ return;
++
++ kexec_args[0] = argc;
++ kexec_args[1] = (unsigned long)kexec_argv;
++ kexec_args[2] = 0;
++ kexec_args[3] = 0;
++
++ reboot_code_buffer = page_address(image->control_code_page);
++ reloc_delta = reboot_code_buffer - (char *)kexec_relocate_new_kernel;
++
++ kexec_args[1] += reloc_delta;
++ for (i = 0; i < argc; i++)
++ kexec_argv[i] += reloc_delta;
++}
++
+ static void kexec_image_info(const struct kimage *kimage)
+ {
+ unsigned long i;
+@@ -99,6 +191,18 @@ machine_kexec_prepare(struct kimage *kim
+ #endif
+
+ kexec_image_info(kimage);
++ /*
++ * Whenever arguments passed from kexec-tools, Init the arguments as
++ * the original ones to try avoiding booting failure.
++ */
++
++ kexec_args[0] = fw_arg0;
++ kexec_args[1] = fw_arg1;
++ kexec_args[2] = fw_arg2;
++ kexec_args[3] = fw_arg3;
++
++ machine_kexec_init_argv(kimage);
++ machine_kexec_parse_argv(kimage);
+
+ if (_machine_kexec_prepare)
+ return _machine_kexec_prepare(kimage);
+@@ -161,7 +265,7 @@ machine_crash_shutdown(struct pt_regs *r
+ void kexec_nonboot_cpu_jump(void)
+ {
+ local_flush_icache_range((unsigned long)relocated_kexec_smp_wait,
+- reboot_code_buffer + relocate_new_kernel_size);
++ reboot_code_buffer + KEXEC_RELOCATE_NEW_KERNEL_SIZE);
+
+ relocated_kexec_smp_wait(NULL);
+ }
+@@ -199,7 +303,7 @@ void kexec_reboot(void)
+ * machine_kexec() CPU.
+ */
+ local_flush_icache_range(reboot_code_buffer,
+- reboot_code_buffer + relocate_new_kernel_size);
++ reboot_code_buffer + KEXEC_RELOCATE_NEW_KERNEL_SIZE);
+
+ do_kexec = (void *)reboot_code_buffer;
+ do_kexec();
+@@ -212,10 +316,12 @@ machine_kexec(struct kimage *image)
+ unsigned long *ptr;
+
+ reboot_code_buffer =
+- (unsigned long)page_address(image->control_code_page);
++ (unsigned long)page_address(image->control_code_page);
++ pr_info("reboot_code_buffer = %p\n", (void *)reboot_code_buffer);
+
+ kexec_start_address =
+ (unsigned long) phys_to_virt(image->start);
++ pr_info("kexec_start_address = %p\n", (void *)kexec_start_address);
+
+ if (image->type == KEXEC_TYPE_DEFAULT) {
+ kexec_indirection_page =
+@@ -223,9 +329,19 @@ machine_kexec(struct kimage *image)
+ } else {
+ kexec_indirection_page = (unsigned long)&image->head;
+ }
++ pr_info("kexec_indirection_page = %p\n", (void *)kexec_indirection_page);
+
+- memcpy((void*)reboot_code_buffer, relocate_new_kernel,
+- relocate_new_kernel_size);
++ pr_info("Where is memcpy: %p\n", memcpy);
++ pr_info("kexec_relocate_new_kernel = %p, kexec_relocate_new_kernel_end = %p\n",
++ (void *)kexec_relocate_new_kernel, &kexec_relocate_new_kernel_end);
++ pr_info("Copy %lu bytes from %p to %p\n", KEXEC_RELOCATE_NEW_KERNEL_SIZE,
++ (void *)kexec_relocate_new_kernel, (void *)reboot_code_buffer);
++ memcpy((void*)reboot_code_buffer, kexec_relocate_new_kernel,
++ KEXEC_RELOCATE_NEW_KERNEL_SIZE);
++
++ pr_info("Before _print_args().\n");
++ machine_kexec_print_args();
++ pr_info("Before eval loop.\n");
+
+ /*
+ * The generic kexec code builds a page list with physical
+@@ -256,7 +372,7 @@ machine_kexec(struct kimage *image)
+ #ifdef CONFIG_SMP
+ /* All secondary cpus now may jump to kexec_wait cycle */
+ relocated_kexec_smp_wait = reboot_code_buffer +
+- (void *)(kexec_smp_wait - relocate_new_kernel);
++ (void *)(kexec_smp_wait - kexec_relocate_new_kernel);
+ smp_wmb();
+ atomic_set(&kexec_ready_to_reboot, 1);
+ #endif
+--- /dev/null
++++ b/arch/mips/kernel/machine_kexec.h
+@@ -0,0 +1,20 @@
++#ifndef _MACHINE_KEXEC_H
++#define _MACHINE_KEXEC_H
++
++#ifndef __ASSEMBLY__
++extern const unsigned char kexec_relocate_new_kernel[];
++extern unsigned long kexec_relocate_new_kernel_end;
++extern unsigned long kexec_start_address;
++extern unsigned long kexec_indirection_page;
++
++extern char kexec_argv_buf[];
++extern char *kexec_argv[];
++
++#define KEXEC_RELOCATE_NEW_KERNEL_SIZE ((unsigned long)&kexec_relocate_new_kernel_end - (unsigned long)kexec_relocate_new_kernel)
++#endif /* !__ASSEMBLY__ */
++
++#define KEXEC_COMMAND_LINE_SIZE 256
++#define KEXEC_ARGV_SIZE (KEXEC_COMMAND_LINE_SIZE / 16)
++#define KEXEC_MAX_ARGC (KEXEC_ARGV_SIZE / sizeof(long))
++
++#endif
+--- a/arch/mips/kernel/relocate_kernel.S
++++ b/arch/mips/kernel/relocate_kernel.S
+@@ -10,8 +10,9 @@
+ #include <asm/mipsregs.h>
+ #include <asm/stackframe.h>
+ #include <asm/addrspace.h>
++#include "machine_kexec.h"
+
+-LEAF(relocate_new_kernel)
++LEAF(kexec_relocate_new_kernel)
+ PTR_L a0, arg0
+ PTR_L a1, arg1
+ PTR_L a2, arg2
+@@ -96,7 +97,7 @@ done:
+ #endif
+ /* jump to kexec_start_address */
+ j s1
+- END(relocate_new_kernel)
++ END(kexec_relocate_new_kernel)
+
+ #ifdef CONFIG_SMP
+ /*
+@@ -182,9 +183,15 @@ kexec_indirection_page:
+ PTR 0
+ .size kexec_indirection_page, PTRSIZE
+
+-relocate_new_kernel_end:
++kexec_argv_buf:
++ EXPORT(kexec_argv_buf)
++ .skip KEXEC_COMMAND_LINE_SIZE
++ .size kexec_argv_buf, KEXEC_COMMAND_LINE_SIZE
+
+-relocate_new_kernel_size:
+- EXPORT(relocate_new_kernel_size)
+- PTR relocate_new_kernel_end - relocate_new_kernel
+- .size relocate_new_kernel_size, PTRSIZE
++kexec_argv:
++ EXPORT(kexec_argv)
++ .skip KEXEC_ARGV_SIZE
++ .size kexec_argv, KEXEC_ARGV_SIZE
++
++kexec_relocate_new_kernel_end:
++ EXPORT(kexec_relocate_new_kernel_end)
diff --git a/target/linux/generic/pending-5.10/332-arc-add-OWRTDTB-section.patch b/target/linux/generic/pending-5.10/332-arc-add-OWRTDTB-section.patch
new file mode 100644
index 0000000000..30158cf399
--- /dev/null
+++ b/target/linux/generic/pending-5.10/332-arc-add-OWRTDTB-section.patch
@@ -0,0 +1,84 @@
+From bb0c3b0175240bf152fd7c644821a0cf9f77c37c Mon Sep 17 00:00:00 2001
+From: Evgeniy Didin <Evgeniy.Didin@synopsys.com>
+Date: Fri, 15 Mar 2019 18:53:38 +0300
+Subject: [PATCH] arc add OWRTDTB section
+
+This change allows OpenWRT to patch resulting kernel binary with
+external .dtb.
+
+That allows us to re-use exactky the same vmlinux on different boards
+given its ARC core configurations match (at least cache line sizes etc).
+
+""patch-dtb" searches for ASCII "OWRTDTB:" strign and copies external
+.dtb right after it, keeping the string in place.
+
+Signed-off-by: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
+Signed-off-by: Alexey Brodkin <abrodkin@synopsys.com>
+Signed-off-by: Evgeniy Didin <Evgeniy.Didin@synopsys.com>
+---
+ arch/arc/kernel/head.S | 10 ++++++++++
+ arch/arc/kernel/setup.c | 4 +++-
+ arch/arc/kernel/vmlinux.lds.S | 13 +++++++++++++
+ 3 files changed, 26 insertions(+), 1 deletion(-)
+
+--- a/arch/arc/kernel/head.S
++++ b/arch/arc/kernel/head.S
+@@ -88,6 +88,16 @@
+ DSP_EARLY_INIT
+ .endm
+
++ ; Here "patch-dtb" will embed external .dtb
++ ; Note "patch-dtb" searches for ASCII "OWRTDTB:" string
++ ; and pastes .dtb right after it, hense the string precedes
++ ; __image_dtb symbol.
++ .section .owrt, "aw",@progbits
++ .ascii "OWRTDTB:"
++ENTRY(__image_dtb)
++ .fill 0x4000
++END(__image_dtb)
++
+ .section .init.text, "ax",@progbits
+
+ ;----------------------------------------------------------------
+--- a/arch/arc/kernel/setup.c
++++ b/arch/arc/kernel/setup.c
+@@ -495,6 +495,8 @@ static inline bool uboot_arg_invalid(uns
+ /* We always pass 0 as magic from U-boot */
+ #define UBOOT_MAGIC_VALUE 0
+
++extern struct boot_param_header __image_dtb;
++
+ void __init handle_uboot_args(void)
+ {
+ bool use_embedded_dtb = true;
+@@ -533,7 +535,7 @@ void __init handle_uboot_args(void)
+ ignore_uboot_args:
+
+ if (use_embedded_dtb) {
+- machine_desc = setup_machine_fdt(__dtb_start);
++ machine_desc = setup_machine_fdt(&__image_dtb);
+ if (!machine_desc)
+ panic("Embedded DT invalid\n");
+ }
+--- a/arch/arc/kernel/vmlinux.lds.S
++++ b/arch/arc/kernel/vmlinux.lds.S
+@@ -27,6 +27,19 @@ SECTIONS
+
+ . = CONFIG_LINUX_LINK_BASE;
+
++ /*
++ * In OpenWRT we want to patch built binary embedding .dtb of choice.
++ * This is implemented with "patch-dtb" utility which searches for
++ * "OWRTDTB:" string in first 16k of image and if it is found
++ * copies .dtb right after mentioned string.
++ *
++ * Note: "OWRTDTB:" won't be overwritten with .dtb, .dtb will follow it.
++ */
++ .owrt : {
++ *(.owrt)
++ . = ALIGN(PAGE_SIZE);
++ }
++
+ _int_vec_base_lds = .;
+ .vector : {
+ *(.vector)
diff --git a/target/linux/generic/pending-5.10/333-arc-enable-unaligned-access-in-kernel-mode.patch b/target/linux/generic/pending-5.10/333-arc-enable-unaligned-access-in-kernel-mode.patch
new file mode 100644
index 0000000000..1848a84cc4
--- /dev/null
+++ b/target/linux/generic/pending-5.10/333-arc-enable-unaligned-access-in-kernel-mode.patch
@@ -0,0 +1,24 @@
+From: Alexey Brodkin <abrodkin@synopsys.com>
+Subject: arc: enable unaligned access in kernel mode
+
+This enables misaligned access handling even in kernel mode.
+Some wireless drivers (ath9k-htc and mt7601u) use misaligned accesses
+here and there and to cope with that without fixing stuff in the drivers
+we're just gracefully handling it on ARC.
+
+Signed-off-by: Alexey Brodkin <abrodkin@synopsys.com>
+---
+ arch/arc/kernel/unaligned.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arc/kernel/unaligned.c
++++ b/arch/arc/kernel/unaligned.c
+@@ -202,7 +202,7 @@ int misaligned_fixup(unsigned long addre
+ char buf[TASK_COMM_LEN];
+
+ /* handle user mode only and only if enabled by sysadmin */
+- if (!user_mode(regs) || !unaligned_enabled)
++ if (!unaligned_enabled)
+ return 1;
+
+ if (no_unaligned_warning) {
diff --git a/target/linux/generic/pending-5.10/342-powerpc-Enable-kernel-XZ-compression-option-on-PPC_8.patch b/target/linux/generic/pending-5.10/342-powerpc-Enable-kernel-XZ-compression-option-on-PPC_8.patch
new file mode 100644
index 0000000000..6792a66f8a
--- /dev/null
+++ b/target/linux/generic/pending-5.10/342-powerpc-Enable-kernel-XZ-compression-option-on-PPC_8.patch
@@ -0,0 +1,25 @@
+From 66770a004afe10df11d3902e16eaa0c2c39436bb Mon Sep 17 00:00:00 2001
+From: Pawel Dembicki <paweldembicki@gmail.com>
+Date: Fri, 24 May 2019 17:56:19 +0200
+Subject: [PATCH] powerpc: Enable kernel XZ compression option on PPC_85xx
+
+Enable kernel XZ compression option on PPC_85xx. Tested with
+simpleImage on TP-Link TL-WDR4900 (Freescale P1014 processor).
+
+Suggested-by: Christian Lamparter <chunkeey@gmail.com>
+Signed-off-by: Pawel Dembicki <paweldembicki@gmail.com>
+---
+ arch/powerpc/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -214,7 +214,7 @@ config PPC
+ select HAVE_KERNEL_GZIP
+ select HAVE_KERNEL_LZMA if DEFAULT_UIMAGE
+ select HAVE_KERNEL_LZO if DEFAULT_UIMAGE
+- select HAVE_KERNEL_XZ if PPC_BOOK3S || 44x
++ select HAVE_KERNEL_XZ if PPC_BOOK3S || 44x || PPC_85xx
+ select HAVE_KPROBES
+ select HAVE_KPROBES_ON_FTRACE
+ select HAVE_KRETPROBES
diff --git a/target/linux/generic/pending-5.10/400-mtd-mtdsplit-support.patch b/target/linux/generic/pending-5.10/400-mtd-mtdsplit-support.patch
new file mode 100644
index 0000000000..6f816a6ad3
--- /dev/null
+++ b/target/linux/generic/pending-5.10/400-mtd-mtdsplit-support.patch
@@ -0,0 +1,313 @@
+--- a/drivers/mtd/Kconfig
++++ b/drivers/mtd/Kconfig
+@@ -12,6 +12,25 @@ menuconfig MTD
+
+ if MTD
+
++menu "OpenWrt specific MTD options"
++
++config MTD_ROOTFS_ROOT_DEV
++ bool "Automatically set 'rootfs' partition to be root filesystem"
++ default y
++
++config MTD_SPLIT_FIRMWARE
++ bool "Automatically split firmware partition for kernel+rootfs"
++ default y
++
++config MTD_SPLIT_FIRMWARE_NAME
++ string "Firmware partition name"
++ depends on MTD_SPLIT_FIRMWARE
++ default "firmware"
++
++source "drivers/mtd/mtdsplit/Kconfig"
++
++endmenu
++
+ config MTD_TESTS
+ tristate "MTD tests support (DANGEROUS)"
+ depends on m
+--- a/drivers/mtd/mtdpart.c
++++ b/drivers/mtd/mtdpart.c
+@@ -15,10 +15,12 @@
+ #include <linux/kmod.h>
+ #include <linux/mtd/mtd.h>
+ #include <linux/mtd/partitions.h>
++#include <linux/magic.h>
+ #include <linux/err.h>
+ #include <linux/of.h>
+
+ #include "mtdcore.h"
++#include "mtdsplit/mtdsplit.h"
+
+ /*
+ * MTD methods which simply translate the effective address and pass through
+@@ -236,6 +238,146 @@ static int mtd_add_partition_attrs(struc
+ return ret;
+ }
+
++static DEFINE_SPINLOCK(part_parser_lock);
++static LIST_HEAD(part_parsers);
++
++static struct mtd_part_parser *mtd_part_parser_get(const char *name)
++{
++ struct mtd_part_parser *p, *ret = NULL;
++
++ spin_lock(&part_parser_lock);
++
++ list_for_each_entry(p, &part_parsers, list)
++ if (!strcmp(p->name, name) && try_module_get(p->owner)) {
++ ret = p;
++ break;
++ }
++
++ spin_unlock(&part_parser_lock);
++
++ return ret;
++}
++
++static inline void mtd_part_parser_put(const struct mtd_part_parser *p)
++{
++ module_put(p->owner);
++}
++
++static struct mtd_part_parser *
++get_partition_parser_by_type(enum mtd_parser_type type,
++ struct mtd_part_parser *start)
++{
++ struct mtd_part_parser *p, *ret = NULL;
++
++ spin_lock(&part_parser_lock);
++
++ p = list_prepare_entry(start, &part_parsers, list);
++ if (start)
++ mtd_part_parser_put(start);
++
++ list_for_each_entry_continue(p, &part_parsers, list) {
++ if (p->type == type && try_module_get(p->owner)) {
++ ret = p;
++ break;
++ }
++ }
++
++ spin_unlock(&part_parser_lock);
++
++ return ret;
++}
++
++static int parse_mtd_partitions_by_type(struct mtd_info *master,
++ enum mtd_parser_type type,
++ const struct mtd_partition **pparts,
++ struct mtd_part_parser_data *data)
++{
++ struct mtd_part_parser *prev = NULL;
++ int ret = 0;
++
++ while (1) {
++ struct mtd_part_parser *parser;
++
++ parser = get_partition_parser_by_type(type, prev);
++ if (!parser)
++ break;
++
++ ret = (*parser->parse_fn)(master, pparts, data);
++
++ if (ret > 0) {
++ mtd_part_parser_put(parser);
++ printk(KERN_NOTICE
++ "%d %s partitions found on MTD device %s\n",
++ ret, parser->name, master->name);
++ break;
++ }
++
++ prev = parser;
++ }
++
++ return ret;
++}
++
++static int
++run_parsers_by_type(struct mtd_info *child, enum mtd_parser_type type)
++{
++ struct mtd_partition *parts;
++ int nr_parts;
++ int i;
++
++ nr_parts = parse_mtd_partitions_by_type(child, type, (const struct mtd_partition **)&parts,
++ NULL);
++ if (nr_parts <= 0)
++ return nr_parts;
++
++ if (WARN_ON(!parts))
++ return 0;
++
++ for (i = 0; i < nr_parts; i++) {
++ /* adjust partition offsets */
++ parts[i].offset += child->part.offset;
++
++ mtd_add_partition(child->parent,
++ parts[i].name,
++ parts[i].offset,
++ parts[i].size);
++ }
++
++ kfree(parts);
++
++ return nr_parts;
++}
++
++#ifdef CONFIG_MTD_SPLIT_FIRMWARE_NAME
++#define SPLIT_FIRMWARE_NAME CONFIG_MTD_SPLIT_FIRMWARE_NAME
++#else
++#define SPLIT_FIRMWARE_NAME "unused"
++#endif
++
++static void split_firmware(struct mtd_info *master, struct mtd_info *part)
++{
++ run_parsers_by_type(part, MTD_PARSER_TYPE_FIRMWARE);
++}
++
++static void mtd_partition_split(struct mtd_info *master, struct mtd_info *part)
++{
++ static int rootfs_found = 0;
++
++ if (rootfs_found)
++ return;
++
++ if (!strcmp(part->name, "rootfs")) {
++ run_parsers_by_type(part, MTD_PARSER_TYPE_ROOTFS);
++
++ rootfs_found = 1;
++ }
++
++ if (IS_ENABLED(CONFIG_MTD_SPLIT_FIRMWARE) &&
++ !strcmp(part->name, SPLIT_FIRMWARE_NAME) &&
++ !of_find_property(mtd_get_of_node(part), "compatible", NULL))
++ split_firmware(master, part);
++}
++
+ int mtd_add_partition(struct mtd_info *parent, const char *name,
+ long long offset, long long length)
+ {
+@@ -274,6 +416,7 @@ int mtd_add_partition(struct mtd_info *p
+ if (ret)
+ goto err_remove_part;
+
++ mtd_partition_split(parent, child);
+ mtd_add_partition_attrs(child);
+
+ return 0;
+@@ -422,6 +565,7 @@ int add_mtd_partitions(struct mtd_info *
+ goto err_del_partitions;
+ }
+
++ mtd_partition_split(master, child);
+ mtd_add_partition_attrs(child);
+
+ /* Look for subpartitions */
+@@ -438,31 +582,6 @@ err_del_partitions:
+ return ret;
+ }
+
+-static DEFINE_SPINLOCK(part_parser_lock);
+-static LIST_HEAD(part_parsers);
+-
+-static struct mtd_part_parser *mtd_part_parser_get(const char *name)
+-{
+- struct mtd_part_parser *p, *ret = NULL;
+-
+- spin_lock(&part_parser_lock);
+-
+- list_for_each_entry(p, &part_parsers, list)
+- if (!strcmp(p->name, name) && try_module_get(p->owner)) {
+- ret = p;
+- break;
+- }
+-
+- spin_unlock(&part_parser_lock);
+-
+- return ret;
+-}
+-
+-static inline void mtd_part_parser_put(const struct mtd_part_parser *p)
+-{
+- module_put(p->owner);
+-}
+-
+ /*
+ * Many partition parsers just expected the core to kfree() all their data in
+ * one chunk. Do that by default.
+--- a/include/linux/mtd/partitions.h
++++ b/include/linux/mtd/partitions.h
+@@ -75,6 +75,12 @@ struct mtd_part_parser_data {
+ * Functions dealing with the various ways of partitioning the space
+ */
+
++enum mtd_parser_type {
++ MTD_PARSER_TYPE_DEVICE = 0,
++ MTD_PARSER_TYPE_ROOTFS,
++ MTD_PARSER_TYPE_FIRMWARE,
++};
++
+ struct mtd_part_parser {
+ struct list_head list;
+ struct module *owner;
+@@ -83,6 +89,7 @@ struct mtd_part_parser {
+ int (*parse_fn)(struct mtd_info *, const struct mtd_partition **,
+ struct mtd_part_parser_data *);
+ void (*cleanup)(const struct mtd_partition *pparts, int nr_parts);
++ enum mtd_parser_type type;
+ };
+
+ /* Container for passing around a set of parsed partitions */
+--- a/drivers/mtd/Makefile
++++ b/drivers/mtd/Makefile
+@@ -9,6 +9,8 @@ mtd-y := mtdcore.o mtdsuper.o mtdconc
+
+ obj-y += parsers/
+
++obj-$(CONFIG_MTD_SPLIT) += mtdsplit/
++
+ # 'Users' - code which presents functionality to userspace.
+ obj-$(CONFIG_MTD_BLKDEVS) += mtd_blkdevs.o
+ obj-$(CONFIG_MTD_BLOCK) += mtdblock.o
+--- a/include/linux/mtd/mtd.h
++++ b/include/linux/mtd/mtd.h
+@@ -608,6 +608,24 @@ static inline void mtd_align_erase_req(s
+ req->len += mtd->erasesize - mod;
+ }
+
++static inline uint64_t mtd_roundup_to_eb(uint64_t sz, struct mtd_info *mtd)
++{
++ if (mtd_mod_by_eb(sz, mtd) == 0)
++ return sz;
++
++ /* Round up to next erase block */
++ return (mtd_div_by_eb(sz, mtd) + 1) * mtd->erasesize;
++}
++
++static inline uint64_t mtd_rounddown_to_eb(uint64_t sz, struct mtd_info *mtd)
++{
++ if (mtd_mod_by_eb(sz, mtd) == 0)
++ return sz;
++
++ /* Round down to the start of the current erase block */
++ return (mtd_div_by_eb(sz, mtd)) * mtd->erasesize;
++}
++
+ static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd)
+ {
+ if (mtd->writesize_shift)
+@@ -680,6 +698,13 @@ extern void __put_mtd_device(struct mtd_
+ extern struct mtd_info *get_mtd_device_nm(const char *name);
+ extern void put_mtd_device(struct mtd_info *mtd);
+
++static inline uint64_t mtdpart_get_offset(const struct mtd_info *mtd)
++{
++ if (!mtd_is_partition(mtd))
++ return 0;
++
++ return mtd->part.offset;
++}
+
+ struct mtd_notifier {
+ void (*add)(struct mtd_info *mtd);
diff --git a/target/linux/generic/pending-5.10/419-mtd-redboot-add-of_match_table-with-DT-binding.patch b/target/linux/generic/pending-5.10/419-mtd-redboot-add-of_match_table-with-DT-binding.patch
new file mode 100644
index 0000000000..8358a307cd
--- /dev/null
+++ b/target/linux/generic/pending-5.10/419-mtd-redboot-add-of_match_table-with-DT-binding.patch
@@ -0,0 +1,22 @@
+From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl>
+Subject: [PATCH] mtd: redboot: add of_match_table with DT binding
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This allows parsing RedBoot compatible partitions for properly described
+flash device in DT.
+
+Signed-off-by: Rafał Miłecki <rafal@milecki.pl>
+---
+
+--- a/drivers/mtd/parsers/redboot.c
++++ b/drivers/mtd/parsers/redboot.c
+@@ -300,6 +300,7 @@ static int parse_redboot_partitions(stru
+
+ static const struct of_device_id mtd_parser_redboot_of_match_table[] = {
+ { .compatible = "redboot-fis" },
++ { .compatible = "ecoscentric,redboot-fis-partitions" },
+ {},
+ };
+ MODULE_DEVICE_TABLE(of, mtd_parser_redboot_of_match_table);
diff --git a/target/linux/generic/pending-5.10/420-mtd-redboot_space.patch b/target/linux/generic/pending-5.10/420-mtd-redboot_space.patch
new file mode 100644
index 0000000000..a2cf838989
--- /dev/null
+++ b/target/linux/generic/pending-5.10/420-mtd-redboot_space.patch
@@ -0,0 +1,41 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: add patch for including unpartitioned space in the rootfs partition for redboot devices (if applicable)
+
+[john@phrozen.org: used by ixp and others]
+
+lede-commit: 394918851f84e4d00fa16eb900e7700e95091f00
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ drivers/mtd/redboot.c | 19 +++++++++++++------
+ 1 file changed, 13 insertions(+), 6 deletions(-)
+
+--- a/drivers/mtd/parsers/redboot.c
++++ b/drivers/mtd/parsers/redboot.c
+@@ -274,14 +274,21 @@ static int parse_redboot_partitions(stru
+ #endif
+ names += strlen(names)+1;
+
+-#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
+ if(fl->next && fl->img->flash_base + fl->img->size + master->erasesize <= fl->next->img->flash_base) {
+- i++;
+- parts[i].offset = parts[i-1].size + parts[i-1].offset;
+- parts[i].size = fl->next->img->flash_base - parts[i].offset;
+- parts[i].name = nullname;
+- }
++ if (!strcmp(parts[i].name, "rootfs")) {
++ parts[i].size = fl->next->img->flash_base;
++ parts[i].size &= ~(master->erasesize - 1);
++ parts[i].size -= parts[i].offset;
++#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
++ nrparts--;
++ } else {
++ i++;
++ parts[i].offset = parts[i-1].size + parts[i-1].offset;
++ parts[i].size = fl->next->img->flash_base - parts[i].offset;
++ parts[i].name = nullname;
+ #endif
++ }
++ }
+ tmp_fl = fl;
+ fl = fl->next;
+ kfree(tmp_fl);
diff --git a/target/linux/generic/pending-5.10/430-mtd-add-myloader-partition-parser.patch b/target/linux/generic/pending-5.10/430-mtd-add-myloader-partition-parser.patch
new file mode 100644
index 0000000000..5c00612d03
--- /dev/null
+++ b/target/linux/generic/pending-5.10/430-mtd-add-myloader-partition-parser.patch
@@ -0,0 +1,229 @@
+From: Florian Fainelli <f.fainelli@gmail.com>
+Subject: Add myloader partition table parser
+
+[john@phozen.org: shoud be upstreamable]
+
+lede-commit: d8bf22859b51faa09d22c056fe221a45d2f7a3b8
+Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
+[adjust for kernel 5.4, add myloader.c to patch]
+Signed-off-by: Adrian Schmutzler <freifunk@adrianschmutzler.de>
+
+--- a/drivers/mtd/parsers/Kconfig
++++ b/drivers/mtd/parsers/Kconfig
+@@ -57,6 +57,22 @@ config MTD_CMDLINE_PARTS
+
+ If unsure, say 'N'.
+
++config MTD_MYLOADER_PARTS
++ tristate "MyLoader partition parsing"
++ depends on ADM5120 || ATH25 || ATH79
++ help
++ MyLoader is a bootloader which allows the user to define partitions
++ in flash devices, by putting a table in the second erase block
++ on the device, similar to a partition table. This table gives the
++ offsets and lengths of the user defined partitions.
++
++ If you need code which can detect and parse these tables, and
++ register MTD 'partitions' corresponding to each image detected,
++ enable this option.
++
++ You will still need the parsing functions to be called by the driver
++ for your particular device. It won't happen automatically.
++
+ config MTD_OF_PARTS
+ tristate "OpenFirmware (device tree) partitioning parser"
+ default y
+--- a/drivers/mtd/parsers/Makefile
++++ b/drivers/mtd/parsers/Makefile
+@@ -3,6 +3,7 @@ obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.
+ obj-$(CONFIG_MTD_BCM47XX_PARTS) += bcm47xxpart.o
+ obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o
+ obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
++obj-$(CONFIG_MTD_MYLOADER_PARTS) += myloader.o
+ obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o
+ obj-$(CONFIG_MTD_PARSER_IMAGETAG) += parser_imagetag.o
+ obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
+--- /dev/null
++++ b/drivers/mtd/parsers/myloader.c
+@@ -0,0 +1,181 @@
++/*
++ * Parse MyLoader-style flash partition tables and produce a Linux partition
++ * array to match.
++ *
++ * Copyright (C) 2007-2009 Gabor Juhos <juhosg@openwrt.org>
++ *
++ * This file was based on drivers/mtd/redboot.c
++ * Author: Red Hat, Inc. - David Woodhouse <dwmw2@cambridge.redhat.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <linux/vmalloc.h>
++#include <linux/mtd/mtd.h>
++#include <linux/mtd/partitions.h>
++#include <linux/byteorder/generic.h>
++#include <linux/myloader.h>
++
++#define BLOCK_LEN_MIN 0x10000
++#define PART_NAME_LEN 32
++
++struct part_data {
++ struct mylo_partition_table tab;
++ char names[MYLO_MAX_PARTITIONS][PART_NAME_LEN];
++};
++
++static int myloader_parse_partitions(struct mtd_info *master,
++ const struct mtd_partition **pparts,
++ struct mtd_part_parser_data *data)
++{
++ struct part_data *buf;
++ struct mylo_partition_table *tab;
++ struct mylo_partition *part;
++ struct mtd_partition *mtd_parts;
++ struct mtd_partition *mtd_part;
++ int num_parts;
++ int ret, i;
++ size_t retlen;
++ char *names;
++ unsigned long offset;
++ unsigned long blocklen;
++
++ buf = vmalloc(sizeof(*buf));
++ if (!buf) {
++ return -ENOMEM;
++ goto out;
++ }
++ tab = &buf->tab;
++
++ blocklen = master->erasesize;
++ if (blocklen < BLOCK_LEN_MIN)
++ blocklen = BLOCK_LEN_MIN;
++
++ offset = blocklen;
++
++ /* Find the partition table */
++ for (i = 0; i < 4; i++, offset += blocklen) {
++ printk(KERN_DEBUG "%s: searching for MyLoader partition table"
++ " at offset 0x%lx\n", master->name, offset);
++
++ ret = mtd_read(master, offset, sizeof(*buf), &retlen,
++ (void *)buf);
++ if (ret)
++ goto out_free_buf;
++
++ if (retlen != sizeof(*buf)) {
++ ret = -EIO;
++ goto out_free_buf;
++ }
++
++ /* Check for Partition Table magic number */
++ if (tab->magic == le32_to_cpu(MYLO_MAGIC_PARTITIONS))
++ break;
++
++ }
++
++ if (tab->magic != le32_to_cpu(MYLO_MAGIC_PARTITIONS)) {
++ printk(KERN_DEBUG "%s: no MyLoader partition table found\n",
++ master->name);
++ ret = 0;
++ goto out_free_buf;
++ }
++
++ /* The MyLoader and the Partition Table is always present */
++ num_parts = 2;
++
++ /* Detect number of used partitions */
++ for (i = 0; i < MYLO_MAX_PARTITIONS; i++) {
++ part = &tab->partitions[i];
++
++ if (le16_to_cpu(part->type) == PARTITION_TYPE_FREE)
++ continue;
++
++ num_parts++;
++ }
++
++ mtd_parts = kzalloc((num_parts * sizeof(*mtd_part) +
++ num_parts * PART_NAME_LEN), GFP_KERNEL);
++
++ if (!mtd_parts) {
++ ret = -ENOMEM;
++ goto out_free_buf;
++ }
++
++ mtd_part = mtd_parts;
++ names = (char *)&mtd_parts[num_parts];
++
++ strncpy(names, "myloader", PART_NAME_LEN);
++ mtd_part->name = names;
++ mtd_part->offset = 0;
++ mtd_part->size = offset;
++ mtd_part->mask_flags = MTD_WRITEABLE;
++ mtd_part++;
++ names += PART_NAME_LEN;
++
++ strncpy(names, "partition_table", PART_NAME_LEN);
++ mtd_part->name = names;
++ mtd_part->offset = offset;
++ mtd_part->size = blocklen;
++ mtd_part->mask_flags = MTD_WRITEABLE;
++ mtd_part++;
++ names += PART_NAME_LEN;
++
++ for (i = 0; i < MYLO_MAX_PARTITIONS; i++) {
++ part = &tab->partitions[i];
++
++ if (le16_to_cpu(part->type) == PARTITION_TYPE_FREE)
++ continue;
++
++ if ((buf->names[i][0]) && (buf->names[i][0] != '\xff'))
++ strncpy(names, buf->names[i], PART_NAME_LEN);
++ else
++ snprintf(names, PART_NAME_LEN, "partition%d", i);
++
++ mtd_part->offset = le32_to_cpu(part->addr);
++ mtd_part->size = le32_to_cpu(part->size);
++ mtd_part->name = names;
++ mtd_part++;
++ names += PART_NAME_LEN;
++ }
++
++ *pparts = mtd_parts;
++ ret = num_parts;
++
++ out_free_buf:
++ vfree(buf);
++ out:
++ return ret;
++}
++
++static struct mtd_part_parser myloader_mtd_parser = {
++ .owner = THIS_MODULE,
++ .parse_fn = myloader_parse_partitions,
++ .name = "MyLoader",
++};
++
++static int __init myloader_mtd_parser_init(void)
++{
++ register_mtd_parser(&myloader_mtd_parser);
++
++ return 0;
++}
++
++static void __exit myloader_mtd_parser_exit(void)
++{
++ deregister_mtd_parser(&myloader_mtd_parser);
++}
++
++module_init(myloader_mtd_parser_init);
++module_exit(myloader_mtd_parser_exit);
++
++MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
++MODULE_DESCRIPTION("Parsing code for MyLoader partition tables");
++MODULE_LICENSE("GPL v2");
diff --git a/target/linux/generic/pending-5.10/431-mtd-bcm47xxpart-check-for-bad-blocks-when-calculatin.patch b/target/linux/generic/pending-5.10/431-mtd-bcm47xxpart-check-for-bad-blocks-when-calculatin.patch
new file mode 100644
index 0000000000..2ea59cd872
--- /dev/null
+++ b/target/linux/generic/pending-5.10/431-mtd-bcm47xxpart-check-for-bad-blocks-when-calculatin.patch
@@ -0,0 +1,68 @@
+From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <zajec5@gmail.com>
+Subject: [PATCH] mtd: bcm47xxpart: check for bad blocks when calculating offsets
+
+Signed-off-by: Rafał Miłecki <zajec5@gmail.com>
+---
+
+--- a/drivers/mtd/parsers/parser_trx.c
++++ b/drivers/mtd/parsers/parser_trx.c
+@@ -25,6 +25,33 @@ struct trx_header {
+ uint32_t offset[3];
+ } __packed;
+
++/*
++ * Calculate real end offset (address) for a given amount of data. It checks
++ * all blocks skipping bad ones.
++ */
++static size_t parser_trx_real_offset(struct mtd_info *mtd, size_t bytes)
++{
++ size_t real_offset = 0;
++
++ if (mtd_block_isbad(mtd, real_offset))
++ pr_warn("Base offset shouldn't be at bad block");
++
++ while (bytes >= mtd->erasesize) {
++ bytes -= mtd->erasesize;
++ real_offset += mtd->erasesize;
++ while (mtd_block_isbad(mtd, real_offset)) {
++ real_offset += mtd->erasesize;
++
++ if (real_offset >= mtd->size)
++ return real_offset - mtd->erasesize;
++ }
++ }
++
++ real_offset += bytes;
++
++ return real_offset;
++}
++
+ static const char *parser_trx_data_part_name(struct mtd_info *master,
+ size_t offset)
+ {
+@@ -79,21 +106,21 @@ static int parser_trx_parse(struct mtd_i
+ if (trx.offset[2]) {
+ part = &parts[curr_part++];
+ part->name = "loader";
+- part->offset = trx.offset[i];
++ part->offset = parser_trx_real_offset(mtd, trx.offset[i]);
+ i++;
+ }
+
+ if (trx.offset[i]) {
+ part = &parts[curr_part++];
+ part->name = "linux";
+- part->offset = trx.offset[i];
++ part->offset = parser_trx_real_offset(mtd, trx.offset[i]);
+ i++;
+ }
+
+ if (trx.offset[i]) {
+ part = &parts[curr_part++];
+- part->name = parser_trx_data_part_name(mtd, trx.offset[i]);
+- part->offset = trx.offset[i];
++ part->offset = parser_trx_real_offset(mtd, trx.offset[i]);
++ part->name = parser_trx_data_part_name(mtd, part->offset);
+ i++;
+ }
+
diff --git a/target/linux/generic/pending-5.10/432-mtd-bcm47xxpart-detect-T_Meter-partition.patch b/target/linux/generic/pending-5.10/432-mtd-bcm47xxpart-detect-T_Meter-partition.patch
new file mode 100644
index 0000000000..852654d924
--- /dev/null
+++ b/target/linux/generic/pending-5.10/432-mtd-bcm47xxpart-detect-T_Meter-partition.patch
@@ -0,0 +1,37 @@
+From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <zajec5@gmail.com>
+Subject: mtd: bcm47xxpart: detect T_Meter partition
+
+It can be found on many Netgear devices. It consists of many 0x30 blocks
+starting with 4D 54.
+
+Signed-off-by: Rafał Miłecki <zajec5@gmail.com>
+---
+ drivers/mtd/bcm47xxpart.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/drivers/mtd/parsers/bcm47xxpart.c
++++ b/drivers/mtd/parsers/bcm47xxpart.c
+@@ -35,6 +35,7 @@
+ #define NVRAM_HEADER 0x48534C46 /* FLSH */
+ #define POT_MAGIC1 0x54544f50 /* POTT */
+ #define POT_MAGIC2 0x504f /* OP */
++#define T_METER_MAGIC 0x4D540000 /* MT */
+ #define ML_MAGIC1 0x39685a42
+ #define ML_MAGIC2 0x26594131
+ #define TRX_MAGIC 0x30524448
+@@ -178,6 +179,15 @@ static int bcm47xxpart_parse(struct mtd_
+ MTD_WRITEABLE);
+ continue;
+ }
++
++ /* T_Meter */
++ if ((le32_to_cpu(buf[0x000 / 4]) & 0xFFFF0000) == T_METER_MAGIC &&
++ (le32_to_cpu(buf[0x030 / 4]) & 0xFFFF0000) == T_METER_MAGIC &&
++ (le32_to_cpu(buf[0x060 / 4]) & 0xFFFF0000) == T_METER_MAGIC) {
++ bcm47xxpart_add_part(&parts[curr_part++], "T_Meter", offset,
++ MTD_WRITEABLE);
++ continue;
++ }
+
+ /* TRX */
+ if (buf[0x000 / 4] == TRX_MAGIC) {
diff --git a/target/linux/generic/pending-5.10/435-mtd-add-routerbootpart-parser-config.patch b/target/linux/generic/pending-5.10/435-mtd-add-routerbootpart-parser-config.patch
new file mode 100644
index 0000000000..1523e757c7
--- /dev/null
+++ b/target/linux/generic/pending-5.10/435-mtd-add-routerbootpart-parser-config.patch
@@ -0,0 +1,38 @@
+From 4437e01fb6bca63fccdba5d6c44888b0935885c2 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Thibaut=20VAR=C3=88NE?= <hacks@slashdirt.org>
+Date: Tue, 24 Mar 2020 11:45:07 +0100
+Subject: [PATCH] generic: routerboot partition build bits (5.4)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This patch adds routerbootpart kernel build bits
+
+Signed-off-by: Thibaut VARÈNE <hacks@slashdirt.org>
+---
+ drivers/mtd/parsers/Kconfig | 9 +++++++++
+ drivers/mtd/parsers/Makefile | 1 +
+ 2 files changed, 10 insertions(+)
+
+--- a/drivers/mtd/parsers/Kconfig
++++ b/drivers/mtd/parsers/Kconfig
+@@ -176,3 +176,12 @@ config MTD_REDBOOT_PARTS_READONLY
+ 'FIS directory' images, enable this option.
+
+ endif # MTD_REDBOOT_PARTS
++
++config MTD_ROUTERBOOT_PARTS
++ tristate "RouterBoot flash partition parser"
++ depends on MTD && OF
++ help
++ MikroTik RouterBoot is implemented as a multi segment system on the
++ flash, some of which are fixed and some of which are located at
++ variable offsets. This parser handles both cases via properly
++ formatted DTS.
+--- a/drivers/mtd/parsers/Makefile
++++ b/drivers/mtd/parsers/Makefile
+@@ -10,3 +10,4 @@ obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
+ obj-$(CONFIG_MTD_PARSER_TRX) += parser_trx.o
+ obj-$(CONFIG_MTD_SHARPSL_PARTS) += sharpslpart.o
+ obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
++obj-$(CONFIG_MTD_ROUTERBOOT_PARTS) += routerbootpart.o
diff --git a/target/linux/generic/pending-5.10/460-mtd-cfi_cmdset_0002-no-erase_suspend.patch b/target/linux/generic/pending-5.10/460-mtd-cfi_cmdset_0002-no-erase_suspend.patch
new file mode 100644
index 0000000000..659a6386ae
--- /dev/null
+++ b/target/linux/generic/pending-5.10/460-mtd-cfi_cmdset_0002-no-erase_suspend.patch
@@ -0,0 +1,25 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: kernel: disable cfi cmdset 0002 erase suspend
+
+on some platforms, erase suspend leads to data corruption and lockups when write
+ops collide with erase ops. this has been observed on the buffalo wzr-hp-g300nh.
+rather than play whack-a-mole with a hard to reproduce issue on a variety of devices,
+simply disable erase suspend, as it will usually not produce any useful gain on
+the small filesystems used on embedded hardware.
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ drivers/mtd/chips/cfi_cmdset_0002.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/mtd/chips/cfi_cmdset_0002.c
++++ b/drivers/mtd/chips/cfi_cmdset_0002.c
+@@ -909,7 +909,7 @@ static int get_chip(struct map_info *map
+ return 0;
+
+ case FL_ERASING:
+- if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
++ if (1 /* no suspend */ || !cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
+ !(mode == FL_READY || mode == FL_POINT ||
+ (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
+ goto sleep;
diff --git a/target/linux/generic/pending-5.10/461-mtd-cfi_cmdset_0002-add-buffer-write-cmd-timeout.patch b/target/linux/generic/pending-5.10/461-mtd-cfi_cmdset_0002-add-buffer-write-cmd-timeout.patch
new file mode 100644
index 0000000000..79276b1b87
--- /dev/null
+++ b/target/linux/generic/pending-5.10/461-mtd-cfi_cmdset_0002-add-buffer-write-cmd-timeout.patch
@@ -0,0 +1,17 @@
+From: George Kashperko <george@znau.edu.ua>
+Subject: Issue map read after Write Buffer Load command to ensure chip is ready to receive data.
+
+Signed-off-by: George Kashperko <george@znau.edu.ua>
+---
+ drivers/mtd/chips/cfi_cmdset_0002.c | 1 +
+ 1 file changed, 1 insertion(+)
+--- a/drivers/mtd/chips/cfi_cmdset_0002.c
++++ b/drivers/mtd/chips/cfi_cmdset_0002.c
+@@ -2053,6 +2053,7 @@ static int __xipram do_write_buffer(stru
+
+ /* Write Buffer Load */
+ map_write(map, CMD(0x25), cmd_adr);
++ (void) map_read(map, cmd_adr);
+
+ chip->state = FL_WRITING_TO_BUFFER;
+
diff --git a/target/linux/generic/pending-5.10/465-m25p80-mx-disable-software-protection.patch b/target/linux/generic/pending-5.10/465-m25p80-mx-disable-software-protection.patch
new file mode 100644
index 0000000000..e9a9cad869
--- /dev/null
+++ b/target/linux/generic/pending-5.10/465-m25p80-mx-disable-software-protection.patch
@@ -0,0 +1,18 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: Disable software protection bits for Macronix flashes.
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ drivers/mtd/spi-nor/spi-nor.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/mtd/spi-nor/macronix.c
++++ b/drivers/mtd/spi-nor/macronix.c
+@@ -96,6 +96,7 @@ static void macronix_default_init(struct
+ {
+ nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable;
+ nor->params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode;
++ nor->flags |= SNOR_F_HAS_LOCK;
+ }
+
+ static const struct spi_nor_fixups macronix_fixups = {
diff --git a/target/linux/generic/pending-5.10/470-mtd-spi-nor-support-limiting-4K-sectors-support-base.patch b/target/linux/generic/pending-5.10/470-mtd-spi-nor-support-limiting-4K-sectors-support-base.patch
new file mode 100644
index 0000000000..bbdab18102
--- /dev/null
+++ b/target/linux/generic/pending-5.10/470-mtd-spi-nor-support-limiting-4K-sectors-support-base.patch
@@ -0,0 +1,71 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Sat, 4 Nov 2017 07:40:23 +0100
+Subject: [PATCH] mtd: spi-nor: support limiting 4K sectors support based on
+ flash size
+
+Some devices need 4K sectors to be able to deal with small flash chips.
+For instance, w25x05 is 64 KiB in size, and without 4K sectors, the
+entire chip is just one erase block.
+On bigger flash chip sizes, using 4K sectors can significantly slow down
+many operations, including using a writable filesystem. There are several
+platforms where it makes sense to use a single kernel on both kinds of
+devices.
+
+To support this properly, allow configuring an upper flash chip size
+limit for 4K sectors support.
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/drivers/mtd/spi-nor/Kconfig
++++ b/drivers/mtd/spi-nor/Kconfig
+@@ -24,6 +24,17 @@ config MTD_SPI_NOR_USE_4K_SECTORS
+ Please note that some tools/drivers/filesystems may not work with
+ 4096 B erase size (e.g. UBIFS requires 15 KiB as a minimum).
+
++config MTD_SPI_NOR_USE_4K_SECTORS_LIMIT
++ int "Maximum flash chip size to use 4K sectors on (in KiB)"
++ depends on MTD_SPI_NOR_USE_4K_SECTORS
++ default "4096"
++ help
++ There are many flash chips that support 4K sectors, but are so large
++ that using them significantly slows down writing large amounts of
++ data or using a writable filesystem.
++ Any flash chip larger than the size specified in this option will
++ not use 4K sectors.
++
+ source "drivers/mtd/spi-nor/controllers/Kconfig"
+
+ endif # MTD_SPI_NOR
+--- a/drivers/mtd/spi-nor/core.c
++++ b/drivers/mtd/spi-nor/core.c
+@@ -2784,6 +2784,21 @@ static void spi_nor_info_init_params(str
+ */
+ erase_mask = 0;
+ i = 0;
++#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
++ if ((info->flags & SECT_4K_PMC) && (params->size <=
++ CONFIG_MTD_SPI_NOR_USE_4K_SECTORS_LIMIT * 1024)) {
++ erase_mask |= BIT(i);
++ spi_nor_set_erase_type(&map->erase_type[i], 4096u,
++ SPINOR_OP_BE_4K_PMC);
++ i++;
++ } else if ((info->flags & SECT_4K) && (params->size <=
++ CONFIG_MTD_SPI_NOR_USE_4K_SECTORS_LIMIT * 1024)) {
++ erase_mask |= BIT(i);
++ spi_nor_set_erase_type(&map->erase_type[i], 4096u,
++ SPINOR_OP_BE_4K);
++ i++;
++ }
++#else
+ if (info->flags & SECT_4K_PMC) {
+ erase_mask |= BIT(i);
+ spi_nor_set_erase_type(&map->erase_type[i], 4096u,
+@@ -2795,6 +2810,7 @@ static void spi_nor_info_init_params(str
+ SPINOR_OP_BE_4K);
+ i++;
+ }
++#endif
+ erase_mask |= BIT(i);
+ spi_nor_set_erase_type(&map->erase_type[i], info->sector_size,
+ SPINOR_OP_SE);
diff --git a/target/linux/generic/pending-5.10/476-mtd-spi-nor-add-eon-en25q128.patch b/target/linux/generic/pending-5.10/476-mtd-spi-nor-add-eon-en25q128.patch
new file mode 100644
index 0000000000..325fca62f3
--- /dev/null
+++ b/target/linux/generic/pending-5.10/476-mtd-spi-nor-add-eon-en25q128.patch
@@ -0,0 +1,18 @@
+From: Piotr Dymacz <pepe2k@gmail.com>
+Subject: kernel/mtd: add support for EON EN25Q128
+
+Signed-off-by: Piotr Dymacz <pepe2k@gmail.com>
+---
+ drivers/mtd/spi-nor/spi-nor.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/mtd/spi-nor/eon.c
++++ b/drivers/mtd/spi-nor/eon.c
+@@ -15,6 +15,7 @@ static const struct flash_info eon_parts
+ { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
+ { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
+ { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
++ { "en25q128", INFO(0x1c3018, 0, 64 * 1024, 256, SECT_4K) },
+ { "en25q80a", INFO(0x1c3014, 0, 64 * 1024, 16,
+ SECT_4K | SPI_NOR_DUAL_READ) },
+ { "en25qh16", INFO(0x1c7015, 0, 64 * 1024, 32,
diff --git a/target/linux/generic/pending-5.10/479-mtd-spi-nor-add-xtx-xt25f128b.patch b/target/linux/generic/pending-5.10/479-mtd-spi-nor-add-xtx-xt25f128b.patch
new file mode 100644
index 0000000000..3e7cd03679
--- /dev/null
+++ b/target/linux/generic/pending-5.10/479-mtd-spi-nor-add-xtx-xt25f128b.patch
@@ -0,0 +1,79 @@
+From patchwork Thu Feb 6 17:19:41 2020
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+X-Patchwork-Submitter: Daniel Golle <daniel@makrotopia.org>
+X-Patchwork-Id: 1234465
+Date: Thu, 6 Feb 2020 19:19:41 +0200
+From: Daniel Golle <daniel@makrotopia.org>
+To: linux-mtd@lists.infradead.org
+Subject: [PATCH v2] mtd: spi-nor: Add support for xt25f128b chip
+Message-ID: <20200206171941.GA2398@makrotopia.org>
+MIME-Version: 1.0
+Content-Disposition: inline
+List-Subscribe: <http://lists.infradead.org/mailman/listinfo/linux-mtd>,
+ <mailto:linux-mtd-request@lists.infradead.org?subject=subscribe>
+Cc: Eitan Cohen <eitan@neot-semadar.com>, Piotr Dymacz <pepe2k@gmail.com>,
+ Tudor Ambarus <tudor.ambarus@microchip.com>
+Sender: "linux-mtd" <linux-mtd-bounces@lists.infradead.org>
+Errors-To: linux-mtd-bounces+incoming=patchwork.ozlabs.org@lists.infradead.org
+
+Add XT25F128B made by XTX Technology (Shenzhen) Limited.
+This chip supports dual and quad read and uniform 4K-byte erase.
+Verified on Teltonika RUT955 which comes with XT25F128B in recent
+versions of the device.
+
+Signed-off-by: Daniel Golle <daniel@makrotopia.org>
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ drivers/mtd/spi-nor/spi-nor.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/mtd/spi-nor/Makefile
++++ b/drivers/mtd/spi-nor/Makefile
+@@ -17,6 +17,7 @@ spi-nor-objs += sst.o
+ spi-nor-objs += winbond.o
+ spi-nor-objs += xilinx.o
+ spi-nor-objs += xmc.o
++spi-nor-objs += xtx.o
+ obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o
+
+ obj-$(CONFIG_MTD_SPI_NOR) += controllers/
+--- /dev/null
++++ b/drivers/mtd/spi-nor/xtx.c
+@@ -0,0 +1,15 @@
++// SPDX-License-Identifier: GPL-2.0
++#include <linux/mtd/spi-nor.h>
++
++#include "core.h"
++
++static const struct flash_info xtx_parts[] = {
++ /* XTX Technology (Shenzhen) Limited */
++ { "xt25f128b", INFO(0x0B4018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
++};
++
++const struct spi_nor_manufacturer spi_nor_xtx = {
++ .name = "xtx",
++ .parts = xtx_parts,
++ .nparts = ARRAY_SIZE(xtx_parts),
++};
+--- a/drivers/mtd/spi-nor/core.c
++++ b/drivers/mtd/spi-nor/core.c
+@@ -2024,6 +2024,7 @@ static const struct spi_nor_manufacturer
+ &spi_nor_winbond,
+ &spi_nor_xilinx,
+ &spi_nor_xmc,
++ &spi_nor_xtx,
+ };
+
+ static const struct flash_info *
+--- a/drivers/mtd/spi-nor/core.h
++++ b/drivers/mtd/spi-nor/core.h
+@@ -398,6 +398,7 @@ extern const struct spi_nor_manufacturer
+ extern const struct spi_nor_manufacturer spi_nor_winbond;
+ extern const struct spi_nor_manufacturer spi_nor_xilinx;
+ extern const struct spi_nor_manufacturer spi_nor_xmc;
++extern const struct spi_nor_manufacturer spi_nor_xtx;
+
+ int spi_nor_write_enable(struct spi_nor *nor);
+ int spi_nor_write_disable(struct spi_nor *nor);
diff --git a/target/linux/generic/pending-5.10/480-mtd-set-rootfs-to-be-root-dev.patch b/target/linux/generic/pending-5.10/480-mtd-set-rootfs-to-be-root-dev.patch
new file mode 100644
index 0000000000..11cdc179d2
--- /dev/null
+++ b/target/linux/generic/pending-5.10/480-mtd-set-rootfs-to-be-root-dev.patch
@@ -0,0 +1,38 @@
+From: Gabor Juhos <juhosg@openwrt.org>
+Subject: kernel/3.1[02]: move MTD root device setup code to mtdcore
+
+The current code only allows to automatically set
+root device on MTD partitions. Move the code to MTD
+core to allow to use it with all MTD devices.
+
+Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
+---
+ drivers/mtd/mtdcore.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/drivers/mtd/mtdcore.c
++++ b/drivers/mtd/mtdcore.c
+@@ -27,6 +27,7 @@
+ #include <linux/reboot.h>
+ #include <linux/leds.h>
+ #include <linux/debugfs.h>
++#include <linux/root_dev.h>
+ #include <linux/nvmem-provider.h>
+
+ #include <linux/mtd/mtd.h>
+@@ -692,6 +693,15 @@ int add_mtd_device(struct mtd_info *mtd)
+ of this try_ nonsense, and no bitching about it
+ either. :) */
+ __module_get(THIS_MODULE);
++
++ if (!strcmp(mtd->name, "rootfs") &&
++ IS_ENABLED(CONFIG_MTD_ROOTFS_ROOT_DEV) &&
++ ROOT_DEV == 0) {
++ pr_notice("mtd: device %d (%s) set to be root filesystem\n",
++ mtd->index, mtd->name);
++ ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index);
++ }
++
+ return 0;
+
+ fail_nvmem_add:
diff --git a/target/linux/generic/pending-5.10/481-mtd-spi-nor-rework-broken-flash-reset-support.patch b/target/linux/generic/pending-5.10/481-mtd-spi-nor-rework-broken-flash-reset-support.patch
new file mode 100644
index 0000000000..e8e737ffca
--- /dev/null
+++ b/target/linux/generic/pending-5.10/481-mtd-spi-nor-rework-broken-flash-reset-support.patch
@@ -0,0 +1,180 @@
+From ea92cbb50a78404e29de2cc3999a240615ffb1c8 Mon Sep 17 00:00:00 2001
+From: Chuanhong Guo <gch981213@gmail.com>
+Date: Mon, 6 Apr 2020 17:58:48 +0800
+Subject: [PATCH] mtd: spi-nor: rework broken-flash-reset support
+
+Instead of resetting flash to 3B address on remove hook, this
+implementation only enters 4B mode when needed, which prevents
+more unexpected reboot stuck. This implementation makes it only
+break when a kernel panic happens during flash operation on 16M+
+areas.
+*OpenWrt only*: silent broken-flash-reset warning. We are not dealing
+with vendors and it's unpleasant for users to se that unnecessary
+and long WARN_ON print.
+
+Signed-off-by: Chuanhong Guo <gch981213@gmail.com>
+---
+ drivers/mtd/spi-nor/spi-nor.c | 52 +++++++++++++++++++++++++++++++++--
+ 1 file changed, 49 insertions(+), 3 deletions(-)
+
+--- a/drivers/mtd/spi-nor/core.c
++++ b/drivers/mtd/spi-nor/core.c
+@@ -1445,6 +1445,23 @@ destroy_erase_cmd_list:
+ return ret;
+ }
+
++int spi_nor_check_set_addr_width(struct spi_nor *nor, loff_t addr)
++{
++ u8 addr_width;
++
++ if ((nor->flags & (SNOR_F_4B_OPCODES | SNOR_F_BROKEN_RESET)) !=
++ SNOR_F_BROKEN_RESET)
++ return 0;
++
++ addr_width = addr & 0xff000000 ? 4 : 3;
++ if (nor->addr_width == addr_width)
++ return 0;
++
++ nor->addr_width = addr_width;
++
++ return nor->params->set_4byte_addr_mode(nor, addr_width == 4);
++}
++
+ /*
+ * Erase an address range on the nor chip. The address range may extend
+ * one or more erase sectors. Return an error is there is a problem erasing.
+@@ -1472,6 +1489,10 @@ static int spi_nor_erase(struct mtd_info
+ if (ret)
+ return ret;
+
++ ret = spi_nor_check_set_addr_width(nor, instr->addr + instr->len);
++ if (ret < 0)
++ return ret;
++
+ /* whole-chip erase? */
+ if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
+ unsigned long timeout;
+@@ -1531,6 +1552,7 @@ static int spi_nor_erase(struct mtd_info
+ ret = spi_nor_write_disable(nor);
+
+ erase_err:
++ spi_nor_check_set_addr_width(nor, 0);
+ spi_nor_unlock_and_unprep(nor);
+
+ return ret;
+@@ -1870,7 +1892,9 @@ static int spi_nor_lock(struct mtd_info
+ if (ret)
+ return ret;
+
++ spi_nor_check_set_addr_width(nor, ofs + len);
+ ret = nor->params->locking_ops->lock(nor, ofs, len);
++ spi_nor_check_set_addr_width(nor, 0);
+
+ spi_nor_unlock_and_unprep(nor);
+ return ret;
+@@ -1885,7 +1909,9 @@ static int spi_nor_unlock(struct mtd_inf
+ if (ret)
+ return ret;
+
++ spi_nor_check_set_addr_width(nor, ofs + len);
+ ret = nor->params->locking_ops->unlock(nor, ofs, len);
++ spi_nor_check_set_addr_width(nor, 0);
+
+ spi_nor_unlock_and_unprep(nor);
+ return ret;
+@@ -1900,7 +1926,9 @@ static int spi_nor_is_locked(struct mtd_
+ if (ret)
+ return ret;
+
++ spi_nor_check_set_addr_width(nor, ofs + len);
+ ret = nor->params->locking_ops->is_locked(nor, ofs, len);
++ spi_nor_check_set_addr_width(nor, 0);
+
+ spi_nor_unlock_and_unprep(nor);
+ return ret;
+@@ -2093,6 +2121,10 @@ static int spi_nor_read(struct mtd_info
+ if (ret)
+ return ret;
+
++ ret = spi_nor_check_set_addr_width(nor, from + len);
++ if (ret < 0)
++ return ret;
++
+ while (len) {
+ loff_t addr = from;
+
+@@ -2116,6 +2148,7 @@ static int spi_nor_read(struct mtd_info
+ ret = 0;
+
+ read_err:
++ spi_nor_check_set_addr_width(nor, 0);
+ spi_nor_unlock_and_unprep(nor);
+ return ret;
+ }
+@@ -2138,6 +2171,10 @@ static int spi_nor_write(struct mtd_info
+ if (ret)
+ return ret;
+
++ ret = spi_nor_check_set_addr_width(nor, to + len);
++ if (ret < 0)
++ return ret;
++
+ for (i = 0; i < len; ) {
+ ssize_t written;
+ loff_t addr = to + i;
+@@ -2180,6 +2217,7 @@ static int spi_nor_write(struct mtd_info
+ }
+
+ write_err:
++ spi_nor_check_set_addr_width(nor, 0);
+ spi_nor_unlock_and_unprep(nor);
+ return ret;
+ }
+@@ -2975,9 +3013,13 @@ static int spi_nor_init(struct spi_nor *
+ * reboots (e.g., crashes). Warn the user (or hopefully, system
+ * designer) that this is bad.
+ */
+- WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
+- "enabling reset hack; may not recover from unexpected reboots\n");
+- nor->params->set_4byte_addr_mode(nor, true);
++ if (nor->flags & SNOR_F_BROKEN_RESET) {
++ dev_warn(nor->dev,
++ "enabling reset hack; may not recover from unexpected reboots\n");
++ nor->addr_width = 3;
++ } else {
++ nor->params->set_4byte_addr_mode(nor, true);
++ }
+ }
+
+ return 0;
+--- a/drivers/mtd/spi-nor/core.h
++++ b/drivers/mtd/spi-nor/core.h
+@@ -400,6 +400,7 @@ extern const struct spi_nor_manufacturer
+ extern const struct spi_nor_manufacturer spi_nor_xmc;
+ extern const struct spi_nor_manufacturer spi_nor_xtx;
+
++int spi_nor_check_set_addr_width(struct spi_nor *nor, loff_t addr);
+ int spi_nor_write_enable(struct spi_nor *nor);
+ int spi_nor_write_disable(struct spi_nor *nor);
+ int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable);
+--- a/drivers/mtd/spi-nor/sst.c
++++ b/drivers/mtd/spi-nor/sst.c
+@@ -55,6 +55,10 @@ static int sst_write(struct mtd_info *mt
+ if (ret)
+ return ret;
+
++ ret = spi_nor_check_set_addr_width(nor, to + len);
++ if (ret < 0)
++ return ret;
++
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto out;
+@@ -124,6 +128,7 @@ static int sst_write(struct mtd_info *mt
+ }
+ out:
+ *retlen += actual;
++ spi_nor_check_set_addr_width(nor, 0);
+ spi_nor_unlock_and_unprep(nor);
+ return ret;
+ }
diff --git a/target/linux/generic/pending-5.10/482-mtd-spi-nor-add-support-for-Gigadevice-GD25D05.patch b/target/linux/generic/pending-5.10/482-mtd-spi-nor-add-support-for-Gigadevice-GD25D05.patch
new file mode 100644
index 0000000000..c32cde559d
--- /dev/null
+++ b/target/linux/generic/pending-5.10/482-mtd-spi-nor-add-support-for-Gigadevice-GD25D05.patch
@@ -0,0 +1,22 @@
+From d68b4aa22e8c625685bfad642dd7337948dc0ad1 Mon Sep 17 00:00:00 2001
+From: Koen Vandeputte <koen.vandeputte@ncentric.com>
+Date: Mon, 6 Jan 2020 13:07:56 +0100
+Subject: [PATCH] mtd: spi-nor: add support for Gigadevice GD25D05
+
+Signed-off-by: Koen Vandeputte <koen.vandeputte@ncentric.com>
+---
+ drivers/mtd/spi-nor/spi-nor.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/mtd/spi-nor/gigadevice.c
++++ b/drivers/mtd/spi-nor/gigadevice.c
+@@ -24,6 +24,9 @@ static struct spi_nor_fixups gd25q256_fi
+ };
+
+ static const struct flash_info gigadevice_parts[] = {
++ { "gd25q05", INFO(0xc84010, 0, 64 * 1024, 1,
++ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
++ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "gd25q16", INFO(0xc84015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
diff --git a/target/linux/generic/pending-5.10/490-ubi-auto-attach-mtd-device-named-ubi-or-data-on-boot.patch b/target/linux/generic/pending-5.10/490-ubi-auto-attach-mtd-device-named-ubi-or-data-on-boot.patch
new file mode 100644
index 0000000000..7c9766fa7b
--- /dev/null
+++ b/target/linux/generic/pending-5.10/490-ubi-auto-attach-mtd-device-named-ubi-or-data-on-boot.patch
@@ -0,0 +1,97 @@
+From: Daniel Golle <daniel@makrotopia.org>
+Subject: ubi: auto-attach mtd device named "ubi" or "data" on boot
+
+Signed-off-by: Daniel Golle <daniel@makrotopia.org>
+---
+ drivers/mtd/ubi/build.c | 36 ++++++++++++++++++++++++++++++++++++
+ 1 file changed, 36 insertions(+)
+
+--- a/drivers/mtd/ubi/build.c
++++ b/drivers/mtd/ubi/build.c
+@@ -1192,6 +1192,73 @@ static struct mtd_info * __init open_mtd
+ return mtd;
+ }
+
++/*
++ * This function tries attaching mtd partitions named either "ubi" or "data"
++ * during boot.
++ */
++static void __init ubi_auto_attach(void)
++{
++ int err;
++ struct mtd_info *mtd;
++ loff_t offset = 0;
++ size_t len;
++ char magic[4];
++
++ /* try attaching mtd device named "ubi" or "data" */
++ mtd = open_mtd_device("ubi");
++ if (IS_ERR(mtd))
++ mtd = open_mtd_device("data");
++
++ if (IS_ERR(mtd))
++ return;
++
++ /* get the first not bad block */
++ if (mtd_can_have_bb(mtd))
++ while (mtd_block_isbad(mtd, offset)) {
++ offset += mtd->erasesize;
++
++ if (offset > mtd->size) {
++ pr_err("UBI error: Failed to find a non-bad "
++ "block on mtd%d\n", mtd->index);
++ goto cleanup;
++ }
++ }
++
++ /* check if the read from flash was successful */
++ err = mtd_read(mtd, offset, 4, &len, (void *) magic);
++ if ((err && !mtd_is_bitflip(err)) || len != 4) {
++ pr_err("UBI error: unable to read from mtd%d\n", mtd->index);
++ goto cleanup;
++ }
++
++ /* check for a valid ubi magic */
++ if (strncmp(magic, "UBI#", 4)) {
++ pr_err("UBI error: no valid UBI magic found inside mtd%d\n", mtd->index);
++ goto cleanup;
++ }
++
++ /* don't auto-add media types where UBI doesn't makes sense */
++ if (mtd->type != MTD_NANDFLASH &&
++ mtd->type != MTD_NORFLASH &&
++ mtd->type != MTD_DATAFLASH &&
++ mtd->type != MTD_MLCNANDFLASH)
++ goto cleanup;
++
++ mutex_lock(&ubi_devices_mutex);
++ pr_notice("UBI: auto-attach mtd%d\n", mtd->index);
++ err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO, 0, 0);
++ mutex_unlock(&ubi_devices_mutex);
++ if (err < 0) {
++ pr_err("UBI error: cannot attach mtd%d\n", mtd->index);
++ goto cleanup;
++ }
++
++ return;
++
++cleanup:
++ put_mtd_device(mtd);
++}
++
+ static int __init ubi_init(void)
+ {
+ int err, i, k;
+@@ -1275,6 +1342,12 @@ static int __init ubi_init(void)
+ }
+ }
+
++ /* auto-attach mtd devices only if built-in to the kernel and no ubi.mtd
++ * parameter was given */
++ if (IS_ENABLED(CONFIG_MTD_ROOTFS_ROOT_DEV) &&
++ !ubi_is_module() && !mtd_devs)
++ ubi_auto_attach();
++
+ err = ubiblock_init();
+ if (err) {
+ pr_err("UBI error: block: cannot initialize, error %d\n", err);
diff --git a/target/linux/generic/pending-5.10/491-ubi-auto-create-ubiblock-device-for-rootfs.patch b/target/linux/generic/pending-5.10/491-ubi-auto-create-ubiblock-device-for-rootfs.patch
new file mode 100644
index 0000000000..61fcbac92e
--- /dev/null
+++ b/target/linux/generic/pending-5.10/491-ubi-auto-create-ubiblock-device-for-rootfs.patch
@@ -0,0 +1,66 @@
+From: Daniel Golle <daniel@makrotopia.org>
+Subject: ubi: auto-create ubiblock device for rootfs
+
+Signed-off-by: Daniel Golle <daniel@makrotopia.org>
+---
+ drivers/mtd/ubi/block.c | 42 ++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 42 insertions(+)
+
+--- a/drivers/mtd/ubi/block.c
++++ b/drivers/mtd/ubi/block.c
+@@ -652,6 +652,44 @@ static void __init ubiblock_create_from_
+ }
+ }
+
++#define UBIFS_NODE_MAGIC 0x06101831
++static inline int ubi_vol_is_ubifs(struct ubi_volume_desc *desc)
++{
++ int ret;
++ uint32_t magic_of, magic;
++ ret = ubi_read(desc, 0, (char *)&magic_of, 0, 4);
++ if (ret)
++ return 0;
++ magic = le32_to_cpu(magic_of);
++ return magic == UBIFS_NODE_MAGIC;
++}
++
++static void __init ubiblock_create_auto_rootfs(void)
++{
++ int ubi_num, ret, is_ubifs;
++ struct ubi_volume_desc *desc;
++ struct ubi_volume_info vi;
++
++ for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++) {
++ desc = ubi_open_volume_nm(ubi_num, "rootfs", UBI_READONLY);
++ if (IS_ERR(desc))
++ continue;
++
++ ubi_get_volume_info(desc, &vi);
++ is_ubifs = ubi_vol_is_ubifs(desc);
++ ubi_close_volume(desc);
++ if (is_ubifs)
++ break;
++
++ ret = ubiblock_create(&vi);
++ if (ret)
++ pr_err("UBI error: block: can't add '%s' volume, err=%d\n",
++ vi.name, ret);
++ /* always break if we get here */
++ break;
++ }
++}
++
+ static void ubiblock_remove_all(void)
+ {
+ struct ubiblock *next;
+@@ -684,6 +722,10 @@ int __init ubiblock_init(void)
+ */
+ ubiblock_create_from_param();
+
++ /* auto-attach "rootfs" volume if existing and non-ubifs */
++ if (IS_ENABLED(CONFIG_MTD_ROOTFS_ROOT_DEV))
++ ubiblock_create_auto_rootfs();
++
+ /*
+ * Block devices are only created upon user requests, so we ignore
+ * existing volumes.
diff --git a/target/linux/generic/pending-5.10/492-try-auto-mounting-ubi0-rootfs-in-init-do_mounts.c.patch b/target/linux/generic/pending-5.10/492-try-auto-mounting-ubi0-rootfs-in-init-do_mounts.c.patch
new file mode 100644
index 0000000000..af65cbfa59
--- /dev/null
+++ b/target/linux/generic/pending-5.10/492-try-auto-mounting-ubi0-rootfs-in-init-do_mounts.c.patch
@@ -0,0 +1,51 @@
+From: Daniel Golle <daniel@makrotopia.org>
+Subject: try auto-mounting ubi0:rootfs in init/do_mounts.c
+
+Signed-off-by: Daniel Golle <daniel@makrotopia.org>
+---
+ init/do_mounts.c | 26 +++++++++++++++++++++++++-
+ 1 file changed, 25 insertions(+), 1 deletion(-)
+
+--- a/init/do_mounts.c
++++ b/init/do_mounts.c
+@@ -474,7 +474,28 @@ retry:
+ out:
+ put_page(page);
+ }
+-
++
++static int __init mount_ubi_rootfs(void)
++{
++ int flags = MS_SILENT;
++ int err, tried = 0;
++
++ while (tried < 2) {
++ err = do_mount_root("ubi0:rootfs", "ubifs", flags, \
++ root_mount_data);
++ switch (err) {
++ case -EACCES:
++ flags |= MS_RDONLY;
++ tried++;
++ break;
++ default:
++ return err;
++ }
++ }
++
++ return -EINVAL;
++}
++
+ #ifdef CONFIG_ROOT_NFS
+
+ #define NFSROOT_TIMEOUT_MIN 5
+@@ -567,6 +588,10 @@ void __init mount_root(void)
+ return;
+ }
+ #endif
++#ifdef CONFIG_MTD_ROOTFS_ROOT_DEV
++ if (!mount_ubi_rootfs())
++ return;
++#endif
+ #ifdef CONFIG_BLOCK
+ {
+ int err = create_dev("/dev/root", ROOT_DEV);
diff --git a/target/linux/generic/pending-5.10/493-ubi-set-ROOT_DEV-to-ubiblock-rootfs-if-unset.patch b/target/linux/generic/pending-5.10/493-ubi-set-ROOT_DEV-to-ubiblock-rootfs-if-unset.patch
new file mode 100644
index 0000000000..2dff46807e
--- /dev/null
+++ b/target/linux/generic/pending-5.10/493-ubi-set-ROOT_DEV-to-ubiblock-rootfs-if-unset.patch
@@ -0,0 +1,34 @@
+From: Daniel Golle <daniel@makrotopia.org>
+Subject: ubi: set ROOT_DEV to ubiblock "rootfs" if unset
+
+Signed-off-by: Daniel Golle <daniel@makrotopia.org>
+---
+ drivers/mtd/ubi/block.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/drivers/mtd/ubi/block.c
++++ b/drivers/mtd/ubi/block.c
+@@ -42,6 +42,7 @@
+ #include <linux/scatterlist.h>
+ #include <linux/idr.h>
+ #include <asm/div64.h>
++#include <linux/root_dev.h>
+
+ #include "ubi-media.h"
+ #include "ubi.h"
+@@ -458,6 +459,15 @@ int ubiblock_create(struct ubi_volume_in
+ dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
+ dev->ubi_num, dev->vol_id, vi->name);
+ mutex_unlock(&devices_mutex);
++
++ if (!strcmp(vi->name, "rootfs") &&
++ IS_ENABLED(CONFIG_MTD_ROOTFS_ROOT_DEV) &&
++ ROOT_DEV == 0) {
++ pr_notice("ubiblock: device ubiblock%d_%d (%s) set to be root filesystem\n",
++ dev->ubi_num, dev->vol_id, vi->name);
++ ROOT_DEV = MKDEV(gd->major, gd->first_minor);
++ }
++
+ return 0;
+
+ out_free_queue:
diff --git a/target/linux/generic/pending-5.10/494-mtd-ubi-add-EOF-marker-support.patch b/target/linux/generic/pending-5.10/494-mtd-ubi-add-EOF-marker-support.patch
new file mode 100644
index 0000000000..81d32ca5a5
--- /dev/null
+++ b/target/linux/generic/pending-5.10/494-mtd-ubi-add-EOF-marker-support.patch
@@ -0,0 +1,60 @@
+From: Gabor Juhos <juhosg@openwrt.org>
+Subject: mtd: add EOF marker support to the UBI layer
+
+Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
+---
+ drivers/mtd/ubi/attach.c | 25 ++++++++++++++++++++++---
+ drivers/mtd/ubi/ubi.h | 1 +
+ 2 files changed, 23 insertions(+), 3 deletions(-)
+
+--- a/drivers/mtd/ubi/attach.c
++++ b/drivers/mtd/ubi/attach.c
+@@ -926,6 +926,13 @@ static bool vol_ignored(int vol_id)
+ #endif
+ }
+
++static bool ec_hdr_has_eof(struct ubi_ec_hdr *ech)
++{
++ return ech->padding1[0] == 'E' &&
++ ech->padding1[1] == 'O' &&
++ ech->padding1[2] == 'F';
++}
++
+ /**
+ * scan_peb - scan and process UBI headers of a PEB.
+ * @ubi: UBI device description object
+@@ -958,9 +965,21 @@ static int scan_peb(struct ubi_device *u
+ return 0;
+ }
+
+- err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
+- if (err < 0)
+- return err;
++ if (!ai->eof_found) {
++ err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
++ if (err < 0)
++ return err;
++
++ if (ec_hdr_has_eof(ech)) {
++ pr_notice("UBI: EOF marker found, PEBs from %d will be erased\n",
++ pnum);
++ ai->eof_found = true;
++ }
++ }
++
++ if (ai->eof_found)
++ err = UBI_IO_FF_BITFLIPS;
++
+ switch (err) {
+ case 0:
+ break;
+--- a/drivers/mtd/ubi/ubi.h
++++ b/drivers/mtd/ubi/ubi.h
+@@ -782,6 +782,7 @@ struct ubi_attach_info {
+ int mean_ec;
+ uint64_t ec_sum;
+ int ec_count;
++ bool eof_found;
+ struct kmem_cache *aeb_slab_cache;
+ struct ubi_ec_hdr *ech;
+ struct ubi_vid_io_buf *vidb;
diff --git a/target/linux/generic/pending-5.10/495-mtd-core-add-get_mtd_device_by_node.patch b/target/linux/generic/pending-5.10/495-mtd-core-add-get_mtd_device_by_node.patch
new file mode 100644
index 0000000000..98c5c2ef19
--- /dev/null
+++ b/target/linux/generic/pending-5.10/495-mtd-core-add-get_mtd_device_by_node.patch
@@ -0,0 +1,75 @@
+From 1bd1b740f208d1cf4071932cc51860d37266c402 Mon Sep 17 00:00:00 2001
+From: Bernhard Frauendienst <kernel@nospam.obeliks.de>
+Date: Sat, 1 Sep 2018 00:30:11 +0200
+Subject: [PATCH 495/497] mtd: core: add get_mtd_device_by_node
+
+Add function to retrieve a mtd device by its OF node. Since drivers can
+assign arbitrary names to mtd devices in the absence of a label
+property, there is no other reliable way to retrieve a mtd device for a
+given OF node.
+
+Signed-off-by: Bernhard Frauendienst <kernel@nospam.obeliks.de>
+Reviewed-by: Miquel Raynal <miquel.raynal@bootlin.com>
+---
+ drivers/mtd/mtdcore.c | 38 ++++++++++++++++++++++++++++++++++++++
+ include/linux/mtd/mtd.h | 2 ++
+ 2 files changed, 40 insertions(+)
+
+--- a/drivers/mtd/mtdcore.c
++++ b/drivers/mtd/mtdcore.c
+@@ -1052,6 +1052,44 @@ out_unlock:
+ }
+ EXPORT_SYMBOL_GPL(get_mtd_device_nm);
+
++/**
++ * get_mtd_device_by_node - obtain a validated handle for an MTD device
++ * by of_node
++ * @of_node: OF node of MTD device to open
++ *
++ * This function returns MTD device description structure in case of
++ * success and an error code in case of failure.
++ */
++struct mtd_info *get_mtd_device_by_node(const struct device_node *of_node)
++{
++ int err = -ENODEV;
++ struct mtd_info *mtd = NULL, *other;
++
++ mutex_lock(&mtd_table_mutex);
++
++ mtd_for_each_device(other) {
++ if (of_node == other->dev.of_node) {
++ mtd = other;
++ break;
++ }
++ }
++
++ if (!mtd)
++ goto out_unlock;
++
++ err = __get_mtd_device(mtd);
++ if (err)
++ goto out_unlock;
++
++ mutex_unlock(&mtd_table_mutex);
++ return mtd;
++
++out_unlock:
++ mutex_unlock(&mtd_table_mutex);
++ return ERR_PTR(err);
++}
++EXPORT_SYMBOL_GPL(get_mtd_device_by_node);
++
+ void put_mtd_device(struct mtd_info *mtd)
+ {
+ mutex_lock(&mtd_table_mutex);
+--- a/include/linux/mtd/mtd.h
++++ b/include/linux/mtd/mtd.h
+@@ -696,6 +696,8 @@ extern struct mtd_info *get_mtd_device(s
+ extern int __get_mtd_device(struct mtd_info *mtd);
+ extern void __put_mtd_device(struct mtd_info *mtd);
+ extern struct mtd_info *get_mtd_device_nm(const char *name);
++extern struct mtd_info *get_mtd_device_by_node(
++ const struct device_node *of_node);
+ extern void put_mtd_device(struct mtd_info *mtd);
+
+ static inline uint64_t mtdpart_get_offset(const struct mtd_info *mtd)
diff --git a/target/linux/generic/pending-5.10/496-dt-bindings-add-bindings-for-mtd-concat-devices.patch b/target/linux/generic/pending-5.10/496-dt-bindings-add-bindings-for-mtd-concat-devices.patch
new file mode 100644
index 0000000000..01f3b9ec2d
--- /dev/null
+++ b/target/linux/generic/pending-5.10/496-dt-bindings-add-bindings-for-mtd-concat-devices.patch
@@ -0,0 +1,52 @@
+From 5734c6669fba7ddb5ef491ccff7159d15dba0b59 Mon Sep 17 00:00:00 2001
+From: Bernhard Frauendienst <kernel@nospam.obeliks.de>
+Date: Wed, 5 Sep 2018 01:32:51 +0200
+Subject: [PATCH 496/497] dt-bindings: add bindings for mtd-concat devices
+
+Document virtual mtd-concat device bindings.
+
+Signed-off-by: Bernhard Frauendienst <kernel@nospam.obeliks.de>
+---
+ .../devicetree/bindings/mtd/mtd-concat.txt | 36 +++++++++++++++++++
+ 1 file changed, 36 insertions(+)
+ create mode 100644 Documentation/devicetree/bindings/mtd/mtd-concat.txt
+
+--- /dev/null
++++ b/Documentation/devicetree/bindings/mtd/mtd-concat.txt
+@@ -0,0 +1,36 @@
++Virtual MTD concat device
++
++Requires properties:
++- devices: list of phandles to mtd nodes that should be concatenated
++
++Example:
++
++&spi {
++ flash0: flash@0 {
++ ...
++ };
++ flash1: flash@1 {
++ ...
++ };
++};
++
++flash {
++ compatible = "mtd-concat";
++
++ devices = <&flash0 &flash1>;
++
++ partitions {
++ compatible = "fixed-partitions";
++
++ partition@0 {
++ label = "boot";
++ reg = <0x0000000 0x0040000>;
++ read-only;
++ };
++
++ partition@40000 {
++ label = "firmware";
++ reg = <0x0040000 0x1fc0000>;
++ };
++ }
++}
diff --git a/target/linux/generic/pending-5.10/497-mtd-mtdconcat-add-dt-driver-for-concat-devices.patch b/target/linux/generic/pending-5.10/497-mtd-mtdconcat-add-dt-driver-for-concat-devices.patch
new file mode 100644
index 0000000000..058cab09e5
--- /dev/null
+++ b/target/linux/generic/pending-5.10/497-mtd-mtdconcat-add-dt-driver-for-concat-devices.patch
@@ -0,0 +1,216 @@
+From e53f712d8eac71f54399b61038ccf87d2cee99d7 Mon Sep 17 00:00:00 2001
+From: Bernhard Frauendienst <kernel@nospam.obeliks.de>
+Date: Sat, 25 Aug 2018 12:35:22 +0200
+Subject: [PATCH 497/497] mtd: mtdconcat: add dt driver for concat devices
+
+Some mtd drivers like physmap variants have support for concatenating
+multiple mtd devices, but there is no generic way to define such a
+concat device from within the device tree.
+
+This is useful for some SoC boards that use multiple flash chips as
+memory banks of a single mtd device, with partitions spanning chip
+borders.
+
+This commit adds a driver for creating virtual mtd-concat devices. They
+must have a compatible = "mtd-concat" line, and define a list of devices
+to concat in the 'devices' property, for example:
+
+flash {
+ compatible = "mtd-concat";
+
+ devices = <&flash0 &flash1>;
+
+ partitions {
+ ...
+ };
+};
+
+The driver is added to the very end of the mtd Makefile to increase the
+likelyhood of all child devices already being loaded at the time of
+probing, preventing unnecessary deferred probes.
+
+Signed-off-by: Bernhard Frauendienst <kernel@nospam.obeliks.de>
+---
+ drivers/mtd/Kconfig | 2 +
+ drivers/mtd/Makefile | 3 +
+ drivers/mtd/composite/Kconfig | 12 +++
+ drivers/mtd/composite/Makefile | 6 ++
+ drivers/mtd/composite/virt_concat.c | 128 ++++++++++++++++++++++++++++
+ 5 files changed, 151 insertions(+)
+ create mode 100644 drivers/mtd/composite/Kconfig
+ create mode 100644 drivers/mtd/composite/Makefile
+ create mode 100644 drivers/mtd/composite/virt_concat.c
+
+--- a/drivers/mtd/Kconfig
++++ b/drivers/mtd/Kconfig
+@@ -238,4 +238,6 @@ source "drivers/mtd/ubi/Kconfig"
+
+ source "drivers/mtd/hyperbus/Kconfig"
+
++source "drivers/mtd/composite/Kconfig"
++
+ endif # MTD
+--- a/drivers/mtd/Makefile
++++ b/drivers/mtd/Makefile
+@@ -33,3 +33,6 @@ obj-y += chips/ lpddr/ maps/ devices/ n
+ obj-$(CONFIG_MTD_SPI_NOR) += spi-nor/
+ obj-$(CONFIG_MTD_UBI) += ubi/
+ obj-$(CONFIG_MTD_HYPERBUS) += hyperbus/
++
++# Composite drivers must be loaded last
++obj-y += composite/
+--- /dev/null
++++ b/drivers/mtd/composite/Kconfig
+@@ -0,0 +1,12 @@
++menu "Composite MTD device drivers"
++ depends on MTD!=n
++
++config MTD_VIRT_CONCAT
++ tristate "Virtual concat MTD device"
++ help
++ This driver allows creation of a virtual MTD concat device, which
++ concatenates multiple underlying MTD devices to a single device.
++ This is required by some SoC boards where multiple memory banks are
++ used as one device with partitions spanning across device boundaries.
++
++endmenu
+--- /dev/null
++++ b/drivers/mtd/composite/Makefile
+@@ -0,0 +1,6 @@
++# SPDX-License-Identifier: GPL-2.0
++#
++# linux/drivers/mtd/composite/Makefile
++#
++
++obj-$(CONFIG_MTD_VIRT_CONCAT) += virt_concat.o
+--- /dev/null
++++ b/drivers/mtd/composite/virt_concat.c
+@@ -0,0 +1,128 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Virtual concat MTD device driver
++ *
++ * Copyright (C) 2018 Bernhard Frauendienst
++ * Author: Bernhard Frauendienst, kernel@nospam.obeliks.de
++ */
++
++#include <linux/module.h>
++#include <linux/device.h>
++#include <linux/mtd/concat.h>
++#include <linux/mtd/mtd.h>
++#include <linux/mtd/partitions.h>
++#include <linux/of.h>
++#include <linux/of_platform.h>
++#include <linux/slab.h>
++
++/*
++ * struct of_virt_concat - platform device driver data.
++ * @cmtd the final mtd_concat device
++ * @num_devices the number of devices in @devices
++ * @devices points to an array of devices already loaded
++ */
++struct of_virt_concat {
++ struct mtd_info *cmtd;
++ int num_devices;
++ struct mtd_info **devices;
++};
++
++static int virt_concat_remove(struct platform_device *pdev)
++{
++ struct of_virt_concat *info;
++ int i;
++
++ info = platform_get_drvdata(pdev);
++ if (!info)
++ return 0;
++
++ // unset data for when this is called after a probe error
++ platform_set_drvdata(pdev, NULL);
++
++ if (info->cmtd) {
++ mtd_device_unregister(info->cmtd);
++ mtd_concat_destroy(info->cmtd);
++ }
++
++ if (info->devices) {
++ for (i = 0; i < info->num_devices; i++)
++ put_mtd_device(info->devices[i]);
++ }
++
++ return 0;
++}
++
++static int virt_concat_probe(struct platform_device *pdev)
++{
++ struct device_node *node = pdev->dev.of_node;
++ struct of_phandle_iterator it;
++ struct of_virt_concat *info;
++ struct mtd_info *mtd;
++ int err = 0, count;
++
++ count = of_count_phandle_with_args(node, "devices", NULL);
++ if (count <= 0)
++ return -EINVAL;
++
++ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
++ if (!info)
++ return -ENOMEM;
++ info->devices = devm_kcalloc(&pdev->dev, count,
++ sizeof(*(info->devices)), GFP_KERNEL);
++ if (!info->devices) {
++ err = -ENOMEM;
++ goto err_remove;
++ }
++
++ platform_set_drvdata(pdev, info);
++
++ of_for_each_phandle(&it, err, node, "devices", NULL, 0) {
++ mtd = get_mtd_device_by_node(it.node);
++ if (IS_ERR(mtd)) {
++ of_node_put(it.node);
++ err = -EPROBE_DEFER;
++ goto err_remove;
++ }
++
++ info->devices[info->num_devices++] = mtd;
++ }
++
++ info->cmtd = mtd_concat_create(info->devices, info->num_devices,
++ dev_name(&pdev->dev));
++ if (!info->cmtd) {
++ err = -ENXIO;
++ goto err_remove;
++ }
++
++ info->cmtd->dev.parent = &pdev->dev;
++ mtd_set_of_node(info->cmtd, node);
++ mtd_device_register(info->cmtd, NULL, 0);
++
++ return 0;
++
++err_remove:
++ virt_concat_remove(pdev);
++
++ return err;
++}
++
++static const struct of_device_id virt_concat_of_match[] = {
++ { .compatible = "mtd-concat", },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, virt_concat_of_match);
++
++static struct platform_driver virt_concat_driver = {
++ .probe = virt_concat_probe,
++ .remove = virt_concat_remove,
++ .driver = {
++ .name = "virt-mtdconcat",
++ .of_match_table = virt_concat_of_match,
++ },
++};
++
++module_platform_driver(virt_concat_driver);
++
++MODULE_LICENSE("GPL v2");
++MODULE_AUTHOR("Bernhard Frauendienst <kernel@nospam.obeliks.de>");
++MODULE_DESCRIPTION("Virtual concat MTD device driver");
diff --git a/target/linux/generic/pending-5.10/498-mtd-mtdconcat-select-readwrite-function.patch b/target/linux/generic/pending-5.10/498-mtd-mtdconcat-select-readwrite-function.patch
new file mode 100644
index 0000000000..9aad0f3e6c
--- /dev/null
+++ b/target/linux/generic/pending-5.10/498-mtd-mtdconcat-select-readwrite-function.patch
@@ -0,0 +1,24 @@
+--- a/drivers/mtd/mtdconcat.c
++++ b/drivers/mtd/mtdconcat.c
+@@ -683,8 +683,12 @@ struct mtd_info *mtd_concat_create(struc
+ concat->mtd._writev = concat_writev;
+ if (subdev[0]->_read_oob)
+ concat->mtd._read_oob = concat_read_oob;
++ else
++ concat->mtd._read = concat_read;
+ if (subdev[0]->_write_oob)
+ concat->mtd._write_oob = concat_write_oob;
++ else
++ concat->mtd._write = concat_write;
+ if (subdev[0]->_block_isbad)
+ concat->mtd._block_isbad = concat_block_isbad;
+ if (subdev[0]->_block_markbad)
+@@ -744,8 +748,6 @@ struct mtd_info *mtd_concat_create(struc
+ concat->mtd.name = name;
+
+ concat->mtd._erase = concat_erase;
+- concat->mtd._read = concat_read;
+- concat->mtd._write = concat_write;
+ concat->mtd._sync = concat_sync;
+ concat->mtd._lock = concat_lock;
+ concat->mtd._unlock = concat_unlock;
diff --git a/target/linux/generic/pending-5.10/500-fs_cdrom_dependencies.patch b/target/linux/generic/pending-5.10/500-fs_cdrom_dependencies.patch
new file mode 100644
index 0000000000..0a5a3aae5d
--- /dev/null
+++ b/target/linux/generic/pending-5.10/500-fs_cdrom_dependencies.patch
@@ -0,0 +1,40 @@
+--- a/fs/hfs/Kconfig
++++ b/fs/hfs/Kconfig
+@@ -2,6 +2,7 @@
+ config HFS_FS
+ tristate "Apple Macintosh file system support"
+ depends on BLOCK
++ select CDROM
+ select NLS
+ help
+ If you say Y here, you will be able to mount Macintosh-formatted
+--- a/fs/hfsplus/Kconfig
++++ b/fs/hfsplus/Kconfig
+@@ -2,6 +2,7 @@
+ config HFSPLUS_FS
+ tristate "Apple Extended HFS file system support"
+ depends on BLOCK
++ select CDROM
+ select NLS
+ select NLS_UTF8
+ help
+--- a/fs/isofs/Kconfig
++++ b/fs/isofs/Kconfig
+@@ -1,6 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ config ISO9660_FS
+ tristate "ISO 9660 CDROM file system support"
++ select CDROM
+ help
+ This is the standard file system used on CD-ROMs. It was previously
+ known as "High Sierra File System" and is called "hsfs" on other
+--- a/fs/udf/Kconfig
++++ b/fs/udf/Kconfig
+@@ -1,6 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ config UDF_FS
+ tristate "UDF file system support"
++ select CDROM
+ select CRC_ITU_T
+ select NLS
+ help
diff --git a/target/linux/generic/pending-5.10/530-jffs2_make_lzma_available.patch b/target/linux/generic/pending-5.10/530-jffs2_make_lzma_available.patch
new file mode 100644
index 0000000000..1bccb30a69
--- /dev/null
+++ b/target/linux/generic/pending-5.10/530-jffs2_make_lzma_available.patch
@@ -0,0 +1,5180 @@
+From: Alexandros C. Couloumbis <alex@ozo.com>
+Subject: fs: add jffs2/lzma support (not activated by default yet)
+
+lede-commit: c2c88d315fa0e881f8b19da07b62859b915b11b2
+Signed-off-by: Alexandros C. Couloumbis <alex@ozo.com>
+---
+ fs/jffs2/Kconfig | 9 +
+ fs/jffs2/Makefile | 3 +
+ fs/jffs2/compr.c | 6 +
+ fs/jffs2/compr.h | 10 +-
+ fs/jffs2/compr_lzma.c | 128 +++
+ fs/jffs2/super.c | 33 +-
+ include/linux/lzma.h | 62 ++
+ include/linux/lzma/LzFind.h | 115 +++
+ include/linux/lzma/LzHash.h | 54 +
+ include/linux/lzma/LzmaDec.h | 231 +++++
+ include/linux/lzma/LzmaEnc.h | 80 ++
+ include/linux/lzma/Types.h | 226 +++++
+ include/uapi/linux/jffs2.h | 1 +
+ lib/Kconfig | 6 +
+ lib/Makefile | 12 +
+ lib/lzma/LzFind.c | 761 ++++++++++++++
+ lib/lzma/LzmaDec.c | 999 +++++++++++++++++++
+ lib/lzma/LzmaEnc.c | 2271 ++++++++++++++++++++++++++++++++++++++++++
+ lib/lzma/Makefile | 7 +
+ 19 files changed, 5008 insertions(+), 6 deletions(-)
+ create mode 100644 fs/jffs2/compr_lzma.c
+ create mode 100644 include/linux/lzma.h
+ create mode 100644 include/linux/lzma/LzFind.h
+ create mode 100644 include/linux/lzma/LzHash.h
+ create mode 100644 include/linux/lzma/LzmaDec.h
+ create mode 100644 include/linux/lzma/LzmaEnc.h
+ create mode 100644 include/linux/lzma/Types.h
+ create mode 100644 lib/lzma/LzFind.c
+ create mode 100644 lib/lzma/LzmaDec.c
+ create mode 100644 lib/lzma/LzmaEnc.c
+ create mode 100644 lib/lzma/Makefile
+
+--- a/fs/jffs2/Kconfig
++++ b/fs/jffs2/Kconfig
+@@ -136,6 +136,15 @@ config JFFS2_LZO
+ This feature was added in July, 2007. Say 'N' if you need
+ compatibility with older bootloaders or kernels.
+
++config JFFS2_LZMA
++ bool "JFFS2 LZMA compression support" if JFFS2_COMPRESSION_OPTIONS
++ select LZMA_COMPRESS
++ select LZMA_DECOMPRESS
++ depends on JFFS2_FS
++ default n
++ help
++ JFFS2 wrapper to the LZMA C SDK
++
+ config JFFS2_RTIME
+ bool "JFFS2 RTIME compression support" if JFFS2_COMPRESSION_OPTIONS
+ depends on JFFS2_FS
+--- a/fs/jffs2/Makefile
++++ b/fs/jffs2/Makefile
+@@ -19,4 +19,7 @@ jffs2-$(CONFIG_JFFS2_RUBIN) += compr_rub
+ jffs2-$(CONFIG_JFFS2_RTIME) += compr_rtime.o
+ jffs2-$(CONFIG_JFFS2_ZLIB) += compr_zlib.o
+ jffs2-$(CONFIG_JFFS2_LZO) += compr_lzo.o
++jffs2-$(CONFIG_JFFS2_LZMA) += compr_lzma.o
+ jffs2-$(CONFIG_JFFS2_SUMMARY) += summary.o
++
++CFLAGS_compr_lzma.o += -Iinclude/linux -Ilib/lzma
+--- a/fs/jffs2/compr.c
++++ b/fs/jffs2/compr.c
+@@ -378,6 +378,9 @@ int __init jffs2_compressors_init(void)
+ #ifdef CONFIG_JFFS2_LZO
+ jffs2_lzo_init();
+ #endif
++#ifdef CONFIG_JFFS2_LZMA
++ jffs2_lzma_init();
++#endif
+ /* Setting default compression mode */
+ #ifdef CONFIG_JFFS2_CMODE_NONE
+ jffs2_compression_mode = JFFS2_COMPR_MODE_NONE;
+@@ -401,6 +404,9 @@ int __init jffs2_compressors_init(void)
+ int jffs2_compressors_exit(void)
+ {
+ /* Unregistering compressors */
++#ifdef CONFIG_JFFS2_LZMA
++ jffs2_lzma_exit();
++#endif
+ #ifdef CONFIG_JFFS2_LZO
+ jffs2_lzo_exit();
+ #endif
+--- a/fs/jffs2/compr.h
++++ b/fs/jffs2/compr.h
+@@ -29,9 +29,9 @@
+ #define JFFS2_DYNRUBIN_PRIORITY 20
+ #define JFFS2_LZARI_PRIORITY 30
+ #define JFFS2_RTIME_PRIORITY 50
+-#define JFFS2_ZLIB_PRIORITY 60
+-#define JFFS2_LZO_PRIORITY 80
+-
++#define JFFS2_LZMA_PRIORITY 70
++#define JFFS2_ZLIB_PRIORITY 80
++#define JFFS2_LZO_PRIORITY 90
+
+ #define JFFS2_RUBINMIPS_DISABLED /* RUBINs will be used only */
+ #define JFFS2_DYNRUBIN_DISABLED /* for decompression */
+@@ -101,5 +101,9 @@ void jffs2_zlib_exit(void);
+ int jffs2_lzo_init(void);
+ void jffs2_lzo_exit(void);
+ #endif
++#ifdef CONFIG_JFFS2_LZMA
++int jffs2_lzma_init(void);
++void jffs2_lzma_exit(void);
++#endif
+
+ #endif /* __JFFS2_COMPR_H__ */
+--- /dev/null
++++ b/fs/jffs2/compr_lzma.c
+@@ -0,0 +1,128 @@
++/*
++ * JFFS2 -- Journalling Flash File System, Version 2.
++ *
++ * For licensing information, see the file 'LICENCE' in this directory.
++ *
++ * JFFS2 wrapper to the LZMA C SDK
++ *
++ */
++
++#include <linux/lzma.h>
++#include "compr.h"
++
++#ifdef __KERNEL__
++ static DEFINE_MUTEX(deflate_mutex);
++#endif
++
++CLzmaEncHandle *p;
++Byte propsEncoded[LZMA_PROPS_SIZE];
++SizeT propsSize = sizeof(propsEncoded);
++
++STATIC void lzma_free_workspace(void)
++{
++ LzmaEnc_Destroy(p, &lzma_alloc, &lzma_alloc);
++}
++
++STATIC int INIT lzma_alloc_workspace(CLzmaEncProps *props)
++{
++ if ((p = (CLzmaEncHandle *)LzmaEnc_Create(&lzma_alloc)) == NULL)
++ {
++ PRINT_ERROR("Failed to allocate lzma deflate workspace\n");
++ return -ENOMEM;
++ }
++
++ if (LzmaEnc_SetProps(p, props) != SZ_OK)
++ {
++ lzma_free_workspace();
++ return -1;
++ }
++
++ if (LzmaEnc_WriteProperties(p, propsEncoded, &propsSize) != SZ_OK)
++ {
++ lzma_free_workspace();
++ return -1;
++ }
++
++ return 0;
++}
++
++STATIC int jffs2_lzma_compress(unsigned char *data_in, unsigned char *cpage_out,
++ uint32_t *sourcelen, uint32_t *dstlen)
++{
++ SizeT compress_size = (SizeT)(*dstlen);
++ int ret;
++
++ #ifdef __KERNEL__
++ mutex_lock(&deflate_mutex);
++ #endif
++
++ ret = LzmaEnc_MemEncode(p, cpage_out, &compress_size, data_in, *sourcelen,
++ 0, NULL, &lzma_alloc, &lzma_alloc);
++
++ #ifdef __KERNEL__
++ mutex_unlock(&deflate_mutex);
++ #endif
++
++ if (ret != SZ_OK)
++ return -1;
++
++ *dstlen = (uint32_t)compress_size;
++
++ return 0;
++}
++
++STATIC int jffs2_lzma_decompress(unsigned char *data_in, unsigned char *cpage_out,
++ uint32_t srclen, uint32_t destlen)
++{
++ int ret;
++ SizeT dl = (SizeT)destlen;
++ SizeT sl = (SizeT)srclen;
++ ELzmaStatus status;
++
++ ret = LzmaDecode(cpage_out, &dl, data_in, &sl, propsEncoded,
++ propsSize, LZMA_FINISH_ANY, &status, &lzma_alloc);
++
++ if (ret != SZ_OK || status == LZMA_STATUS_NOT_FINISHED || dl != (SizeT)destlen)
++ return -1;
++
++ return 0;
++}
++
++static struct jffs2_compressor jffs2_lzma_comp = {
++ .priority = JFFS2_LZMA_PRIORITY,
++ .name = "lzma",
++ .compr = JFFS2_COMPR_LZMA,
++ .compress = &jffs2_lzma_compress,
++ .decompress = &jffs2_lzma_decompress,
++ .disabled = 0,
++};
++
++int INIT jffs2_lzma_init(void)
++{
++ int ret;
++ CLzmaEncProps props;
++ LzmaEncProps_Init(&props);
++
++ props.dictSize = LZMA_BEST_DICT(0x2000);
++ props.level = LZMA_BEST_LEVEL;
++ props.lc = LZMA_BEST_LC;
++ props.lp = LZMA_BEST_LP;
++ props.pb = LZMA_BEST_PB;
++ props.fb = LZMA_BEST_FB;
++
++ ret = lzma_alloc_workspace(&props);
++ if (ret < 0)
++ return ret;
++
++ ret = jffs2_register_compressor(&jffs2_lzma_comp);
++ if (ret)
++ lzma_free_workspace();
++
++ return ret;
++}
++
++void jffs2_lzma_exit(void)
++{
++ jffs2_unregister_compressor(&jffs2_lzma_comp);
++ lzma_free_workspace();
++}
+--- a/fs/jffs2/super.c
++++ b/fs/jffs2/super.c
+@@ -374,14 +374,41 @@ static int __init init_jffs2_fs(void)
+ BUILD_BUG_ON(sizeof(struct jffs2_raw_inode) != 68);
+ BUILD_BUG_ON(sizeof(struct jffs2_raw_summary) != 32);
+
+- pr_info("version 2.2."
++ pr_info("version 2.2"
+ #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
+ " (NAND)"
+ #endif
+ #ifdef CONFIG_JFFS2_SUMMARY
+- " (SUMMARY) "
++ " (SUMMARY)"
+ #endif
+- " © 2001-2006 Red Hat, Inc.\n");
++#ifdef CONFIG_JFFS2_ZLIB
++ " (ZLIB)"
++#endif
++#ifdef CONFIG_JFFS2_LZO
++ " (LZO)"
++#endif
++#ifdef CONFIG_JFFS2_LZMA
++ " (LZMA)"
++#endif
++#ifdef CONFIG_JFFS2_RTIME
++ " (RTIME)"
++#endif
++#ifdef CONFIG_JFFS2_RUBIN
++ " (RUBIN)"
++#endif
++#ifdef CONFIG_JFFS2_CMODE_NONE
++ " (CMODE_NONE)"
++#endif
++#ifdef CONFIG_JFFS2_CMODE_PRIORITY
++ " (CMODE_PRIORITY)"
++#endif
++#ifdef CONFIG_JFFS2_CMODE_SIZE
++ " (CMODE_SIZE)"
++#endif
++#ifdef CONFIG_JFFS2_CMODE_FAVOURLZO
++ " (CMODE_FAVOURLZO)"
++#endif
++ " (c) 2001-2006 Red Hat, Inc.\n");
+
+ jffs2_inode_cachep = kmem_cache_create("jffs2_i",
+ sizeof(struct jffs2_inode_info),
+--- /dev/null
++++ b/include/linux/lzma.h
+@@ -0,0 +1,62 @@
++#ifndef __LZMA_H__
++#define __LZMA_H__
++
++#ifdef __KERNEL__
++ #include <linux/kernel.h>
++ #include <linux/sched.h>
++ #include <linux/slab.h>
++ #include <linux/vmalloc.h>
++ #include <linux/init.h>
++ #define LZMA_MALLOC vmalloc
++ #define LZMA_FREE vfree
++ #define PRINT_ERROR(msg) printk(KERN_WARNING #msg)
++ #define INIT __init
++ #define STATIC static
++#else
++ #include <stdint.h>
++ #include <stdlib.h>
++ #include <stdio.h>
++ #include <unistd.h>
++ #include <string.h>
++ #include <asm/types.h>
++ #include <errno.h>
++ #include <linux/jffs2.h>
++ #ifndef PAGE_SIZE
++ extern int page_size;
++ #define PAGE_SIZE page_size
++ #endif
++ #define LZMA_MALLOC malloc
++ #define LZMA_FREE free
++ #define PRINT_ERROR(msg) fprintf(stderr, msg)
++ #define INIT
++ #define STATIC
++#endif
++
++#include "lzma/LzmaDec.h"
++#include "lzma/LzmaEnc.h"
++
++#define LZMA_BEST_LEVEL (9)
++#define LZMA_BEST_LC (0)
++#define LZMA_BEST_LP (0)
++#define LZMA_BEST_PB (0)
++#define LZMA_BEST_FB (273)
++
++#define LZMA_BEST_DICT(n) (((int)((n) / 2)) * 2)
++
++static void *p_lzma_malloc(void *p, size_t size)
++{
++ if (size == 0)
++ return NULL;
++
++ return LZMA_MALLOC(size);
++}
++
++static void p_lzma_free(void *p, void *address)
++{
++ if (address != NULL)
++ LZMA_FREE(address);
++}
++
++static ISzAlloc lzma_alloc = {p_lzma_malloc, p_lzma_free};
++
++#endif
+--- /dev/null
++++ b/include/linux/lzma/LzFind.h
+@@ -0,0 +1,115 @@
++/* LzFind.h -- Match finder for LZ algorithms
++2009-04-22 : Igor Pavlov : Public domain */
++
++#ifndef __LZ_FIND_H
++#define __LZ_FIND_H
++
++#include "Types.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++typedef UInt32 CLzRef;
++
++typedef struct _CMatchFinder
++{
++ Byte *buffer;
++ UInt32 pos;
++ UInt32 posLimit;
++ UInt32 streamPos;
++ UInt32 lenLimit;
++
++ UInt32 cyclicBufferPos;
++ UInt32 cyclicBufferSize; /* it must be = (historySize + 1) */
++
++ UInt32 matchMaxLen;
++ CLzRef *hash;
++ CLzRef *son;
++ UInt32 hashMask;
++ UInt32 cutValue;
++
++ Byte *bufferBase;
++ ISeqInStream *stream;
++ int streamEndWasReached;
++
++ UInt32 blockSize;
++ UInt32 keepSizeBefore;
++ UInt32 keepSizeAfter;
++
++ UInt32 numHashBytes;
++ int directInput;
++ size_t directInputRem;
++ int btMode;
++ int bigHash;
++ UInt32 historySize;
++ UInt32 fixedHashSize;
++ UInt32 hashSizeSum;
++ UInt32 numSons;
++ SRes result;
++ UInt32 crc[256];
++} CMatchFinder;
++
++#define Inline_MatchFinder_GetPointerToCurrentPos(p) ((p)->buffer)
++#define Inline_MatchFinder_GetIndexByte(p, index) ((p)->buffer[(Int32)(index)])
++
++#define Inline_MatchFinder_GetNumAvailableBytes(p) ((p)->streamPos - (p)->pos)
++
++int MatchFinder_NeedMove(CMatchFinder *p);
++Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p);
++void MatchFinder_MoveBlock(CMatchFinder *p);
++void MatchFinder_ReadIfRequired(CMatchFinder *p);
++
++void MatchFinder_Construct(CMatchFinder *p);
++
++/* Conditions:
++ historySize <= 3 GB
++ keepAddBufferBefore + matchMaxLen + keepAddBufferAfter < 511MB
++*/
++int MatchFinder_Create(CMatchFinder *p, UInt32 historySize,
++ UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter,
++ ISzAlloc *alloc);
++void MatchFinder_Free(CMatchFinder *p, ISzAlloc *alloc);
++void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, UInt32 numItems);
++void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue);
++
++UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *buffer, CLzRef *son,
++ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 _cutValue,
++ UInt32 *distances, UInt32 maxLen);
++
++/*
++Conditions:
++ Mf_GetNumAvailableBytes_Func must be called before each Mf_GetMatchLen_Func.
++ Mf_GetPointerToCurrentPos_Func's result must be used only before any other function
++*/
++
++typedef void (*Mf_Init_Func)(void *object);
++typedef Byte (*Mf_GetIndexByte_Func)(void *object, Int32 index);
++typedef UInt32 (*Mf_GetNumAvailableBytes_Func)(void *object);
++typedef const Byte * (*Mf_GetPointerToCurrentPos_Func)(void *object);
++typedef UInt32 (*Mf_GetMatches_Func)(void *object, UInt32 *distances);
++typedef void (*Mf_Skip_Func)(void *object, UInt32);
++
++typedef struct _IMatchFinder
++{
++ Mf_Init_Func Init;
++ Mf_GetIndexByte_Func GetIndexByte;
++ Mf_GetNumAvailableBytes_Func GetNumAvailableBytes;
++ Mf_GetPointerToCurrentPos_Func GetPointerToCurrentPos;
++ Mf_GetMatches_Func GetMatches;
++ Mf_Skip_Func Skip;
++} IMatchFinder;
++
++void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder *vTable);
++
++void MatchFinder_Init(CMatchFinder *p);
++UInt32 Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances);
++UInt32 Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances);
++void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num);
++void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num);
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif
+--- /dev/null
++++ b/include/linux/lzma/LzHash.h
+@@ -0,0 +1,54 @@
++/* LzHash.h -- HASH functions for LZ algorithms
++2009-02-07 : Igor Pavlov : Public domain */
++
++#ifndef __LZ_HASH_H
++#define __LZ_HASH_H
++
++#define kHash2Size (1 << 10)
++#define kHash3Size (1 << 16)
++#define kHash4Size (1 << 20)
++
++#define kFix3HashSize (kHash2Size)
++#define kFix4HashSize (kHash2Size + kHash3Size)
++#define kFix5HashSize (kHash2Size + kHash3Size + kHash4Size)
++
++#define HASH2_CALC hashValue = cur[0] | ((UInt32)cur[1] << 8);
++
++#define HASH3_CALC { \
++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
++ hash2Value = temp & (kHash2Size - 1); \
++ hashValue = (temp ^ ((UInt32)cur[2] << 8)) & p->hashMask; }
++
++#define HASH4_CALC { \
++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
++ hash2Value = temp & (kHash2Size - 1); \
++ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \
++ hashValue = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)) & p->hashMask; }
++
++#define HASH5_CALC { \
++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
++ hash2Value = temp & (kHash2Size - 1); \
++ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \
++ hash4Value = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)); \
++ hashValue = (hash4Value ^ (p->crc[cur[4]] << 3)) & p->hashMask; \
++ hash4Value &= (kHash4Size - 1); }
++
++/* #define HASH_ZIP_CALC hashValue = ((cur[0] | ((UInt32)cur[1] << 8)) ^ p->crc[cur[2]]) & 0xFFFF; */
++#define HASH_ZIP_CALC hashValue = ((cur[2] | ((UInt32)cur[0] << 8)) ^ p->crc[cur[1]]) & 0xFFFF;
++
++
++#define MT_HASH2_CALC \
++ hash2Value = (p->crc[cur[0]] ^ cur[1]) & (kHash2Size - 1);
++
++#define MT_HASH3_CALC { \
++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
++ hash2Value = temp & (kHash2Size - 1); \
++ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); }
++
++#define MT_HASH4_CALC { \
++ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
++ hash2Value = temp & (kHash2Size - 1); \
++ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \
++ hash4Value = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)) & (kHash4Size - 1); }
++
++#endif
+--- /dev/null
++++ b/include/linux/lzma/LzmaDec.h
+@@ -0,0 +1,231 @@
++/* LzmaDec.h -- LZMA Decoder
++2009-02-07 : Igor Pavlov : Public domain */
++
++#ifndef __LZMA_DEC_H
++#define __LZMA_DEC_H
++
++#include "Types.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/* #define _LZMA_PROB32 */
++/* _LZMA_PROB32 can increase the speed on some CPUs,
++ but memory usage for CLzmaDec::probs will be doubled in that case */
++
++#ifdef _LZMA_PROB32
++#define CLzmaProb UInt32
++#else
++#define CLzmaProb UInt16
++#endif
++
++
++/* ---------- LZMA Properties ---------- */
++
++#define LZMA_PROPS_SIZE 5
++
++typedef struct _CLzmaProps
++{
++ unsigned lc, lp, pb;
++ UInt32 dicSize;
++} CLzmaProps;
++
++/* LzmaProps_Decode - decodes properties
++Returns:
++ SZ_OK
++ SZ_ERROR_UNSUPPORTED - Unsupported properties
++*/
++
++SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size);
++
++
++/* ---------- LZMA Decoder state ---------- */
++
++/* LZMA_REQUIRED_INPUT_MAX = number of required input bytes for worst case.
++ Num bits = log2((2^11 / 31) ^ 22) + 26 < 134 + 26 = 160; */
++
++#define LZMA_REQUIRED_INPUT_MAX 20
++
++typedef struct
++{
++ CLzmaProps prop;
++ CLzmaProb *probs;
++ Byte *dic;
++ const Byte *buf;
++ UInt32 range, code;
++ SizeT dicPos;
++ SizeT dicBufSize;
++ UInt32 processedPos;
++ UInt32 checkDicSize;
++ unsigned state;
++ UInt32 reps[4];
++ unsigned remainLen;
++ int needFlush;
++ int needInitState;
++ UInt32 numProbs;
++ unsigned tempBufSize;
++ Byte tempBuf[LZMA_REQUIRED_INPUT_MAX];
++} CLzmaDec;
++
++#define LzmaDec_Construct(p) { (p)->dic = 0; (p)->probs = 0; }
++
++void LzmaDec_Init(CLzmaDec *p);
++
++/* There are two types of LZMA streams:
++ 0) Stream with end mark. That end mark adds about 6 bytes to compressed size.
++ 1) Stream without end mark. You must know exact uncompressed size to decompress such stream. */
++
++typedef enum
++{
++ LZMA_FINISH_ANY, /* finish at any point */
++ LZMA_FINISH_END /* block must be finished at the end */
++} ELzmaFinishMode;
++
++/* ELzmaFinishMode has meaning only if the decoding reaches output limit !!!
++
++ You must use LZMA_FINISH_END, when you know that current output buffer
++ covers last bytes of block. In other cases you must use LZMA_FINISH_ANY.
++
++ If LZMA decoder sees end marker before reaching output limit, it returns SZ_OK,
++ and output value of destLen will be less than output buffer size limit.
++ You can check status result also.
++
++ You can use multiple checks to test data integrity after full decompression:
++ 1) Check Result and "status" variable.
++ 2) Check that output(destLen) = uncompressedSize, if you know real uncompressedSize.
++ 3) Check that output(srcLen) = compressedSize, if you know real compressedSize.
++ You must use correct finish mode in that case. */
++
++typedef enum
++{
++ LZMA_STATUS_NOT_SPECIFIED, /* use main error code instead */
++ LZMA_STATUS_FINISHED_WITH_MARK, /* stream was finished with end mark. */
++ LZMA_STATUS_NOT_FINISHED, /* stream was not finished */
++ LZMA_STATUS_NEEDS_MORE_INPUT, /* you must provide more input bytes */
++ LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK /* there is probability that stream was finished without end mark */
++} ELzmaStatus;
++
++/* ELzmaStatus is used only as output value for function call */
++
++
++/* ---------- Interfaces ---------- */
++
++/* There are 3 levels of interfaces:
++ 1) Dictionary Interface
++ 2) Buffer Interface
++ 3) One Call Interface
++ You can select any of these interfaces, but don't mix functions from different
++ groups for same object. */
++
++
++/* There are two variants to allocate state for Dictionary Interface:
++ 1) LzmaDec_Allocate / LzmaDec_Free
++ 2) LzmaDec_AllocateProbs / LzmaDec_FreeProbs
++ You can use variant 2, if you set dictionary buffer manually.
++ For Buffer Interface you must always use variant 1.
++
++LzmaDec_Allocate* can return:
++ SZ_OK
++ SZ_ERROR_MEM - Memory allocation error
++ SZ_ERROR_UNSUPPORTED - Unsupported properties
++*/
++
++SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc);
++void LzmaDec_FreeProbs(CLzmaDec *p, ISzAlloc *alloc);
++
++SRes LzmaDec_Allocate(CLzmaDec *state, const Byte *prop, unsigned propsSize, ISzAlloc *alloc);
++void LzmaDec_Free(CLzmaDec *state, ISzAlloc *alloc);
++
++/* ---------- Dictionary Interface ---------- */
++
++/* You can use it, if you want to eliminate the overhead for data copying from
++ dictionary to some other external buffer.
++ You must work with CLzmaDec variables directly in this interface.
++
++ STEPS:
++ LzmaDec_Constr()
++ LzmaDec_Allocate()
++ for (each new stream)
++ {
++ LzmaDec_Init()
++ while (it needs more decompression)
++ {
++ LzmaDec_DecodeToDic()
++ use data from CLzmaDec::dic and update CLzmaDec::dicPos
++ }
++ }
++ LzmaDec_Free()
++*/
++
++/* LzmaDec_DecodeToDic
++
++ The decoding to internal dictionary buffer (CLzmaDec::dic).
++ You must manually update CLzmaDec::dicPos, if it reaches CLzmaDec::dicBufSize !!!
++
++finishMode:
++ It has meaning only if the decoding reaches output limit (dicLimit).
++ LZMA_FINISH_ANY - Decode just dicLimit bytes.
++ LZMA_FINISH_END - Stream must be finished after dicLimit.
++
++Returns:
++ SZ_OK
++ status:
++ LZMA_STATUS_FINISHED_WITH_MARK
++ LZMA_STATUS_NOT_FINISHED
++ LZMA_STATUS_NEEDS_MORE_INPUT
++ LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK
++ SZ_ERROR_DATA - Data error
++*/
++
++SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit,
++ const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status);
++
++
++/* ---------- Buffer Interface ---------- */
++
++/* It's zlib-like interface.
++ See LzmaDec_DecodeToDic description for information about STEPS and return results,
++ but you must use LzmaDec_DecodeToBuf instead of LzmaDec_DecodeToDic and you don't need
++ to work with CLzmaDec variables manually.
++
++finishMode:
++ It has meaning only if the decoding reaches output limit (*destLen).
++ LZMA_FINISH_ANY - Decode just destLen bytes.
++ LZMA_FINISH_END - Stream must be finished after (*destLen).
++*/
++
++SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen,
++ const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status);
++
++
++/* ---------- One Call Interface ---------- */
++
++/* LzmaDecode
++
++finishMode:
++ It has meaning only if the decoding reaches output limit (*destLen).
++ LZMA_FINISH_ANY - Decode just destLen bytes.
++ LZMA_FINISH_END - Stream must be finished after (*destLen).
++
++Returns:
++ SZ_OK
++ status:
++ LZMA_STATUS_FINISHED_WITH_MARK
++ LZMA_STATUS_NOT_FINISHED
++ LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK
++ SZ_ERROR_DATA - Data error
++ SZ_ERROR_MEM - Memory allocation error
++ SZ_ERROR_UNSUPPORTED - Unsupported properties
++ SZ_ERROR_INPUT_EOF - It needs more bytes in input buffer (src).
++*/
++
++SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
++ const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode,
++ ELzmaStatus *status, ISzAlloc *alloc);
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif
+--- /dev/null
++++ b/include/linux/lzma/LzmaEnc.h
+@@ -0,0 +1,80 @@
++/* LzmaEnc.h -- LZMA Encoder
++2009-02-07 : Igor Pavlov : Public domain */
++
++#ifndef __LZMA_ENC_H
++#define __LZMA_ENC_H
++
++#include "Types.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++#define LZMA_PROPS_SIZE 5
++
++typedef struct _CLzmaEncProps
++{
++ int level; /* 0 <= level <= 9 */
++ UInt32 dictSize; /* (1 << 12) <= dictSize <= (1 << 27) for 32-bit version
++ (1 << 12) <= dictSize <= (1 << 30) for 64-bit version
++ default = (1 << 24) */
++ int lc; /* 0 <= lc <= 8, default = 3 */
++ int lp; /* 0 <= lp <= 4, default = 0 */
++ int pb; /* 0 <= pb <= 4, default = 2 */
++ int algo; /* 0 - fast, 1 - normal, default = 1 */
++ int fb; /* 5 <= fb <= 273, default = 32 */
++ int btMode; /* 0 - hashChain Mode, 1 - binTree mode - normal, default = 1 */
++ int numHashBytes; /* 2, 3 or 4, default = 4 */
++ UInt32 mc; /* 1 <= mc <= (1 << 30), default = 32 */
++ unsigned writeEndMark; /* 0 - do not write EOPM, 1 - write EOPM, default = 0 */
++ int numThreads; /* 1 or 2, default = 2 */
++} CLzmaEncProps;
++
++void LzmaEncProps_Init(CLzmaEncProps *p);
++void LzmaEncProps_Normalize(CLzmaEncProps *p);
++UInt32 LzmaEncProps_GetDictSize(const CLzmaEncProps *props2);
++
++
++/* ---------- CLzmaEncHandle Interface ---------- */
++
++/* LzmaEnc_* functions can return the following exit codes:
++Returns:
++ SZ_OK - OK
++ SZ_ERROR_MEM - Memory allocation error
++ SZ_ERROR_PARAM - Incorrect paramater in props
++ SZ_ERROR_WRITE - Write callback error.
++ SZ_ERROR_PROGRESS - some break from progress callback
++ SZ_ERROR_THREAD - errors in multithreading functions (only for Mt version)
++*/
++
++typedef void * CLzmaEncHandle;
++
++CLzmaEncHandle LzmaEnc_Create(ISzAlloc *alloc);
++void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAlloc *alloc, ISzAlloc *allocBig);
++SRes LzmaEnc_SetProps(CLzmaEncHandle p, const CLzmaEncProps *props);
++SRes LzmaEnc_WriteProperties(CLzmaEncHandle p, Byte *properties, SizeT *size);
++SRes LzmaEnc_Encode(CLzmaEncHandle p, ISeqOutStream *outStream, ISeqInStream *inStream,
++ ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig);
++SRes LzmaEnc_MemEncode(CLzmaEncHandle p, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
++ int writeEndMark, ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig);
++
++/* ---------- One Call Interface ---------- */
++
++/* LzmaEncode
++Return code:
++ SZ_OK - OK
++ SZ_ERROR_MEM - Memory allocation error
++ SZ_ERROR_PARAM - Incorrect paramater
++ SZ_ERROR_OUTPUT_EOF - output buffer overflow
++ SZ_ERROR_THREAD - errors in multithreading functions (only for Mt version)
++*/
++
++SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
++ const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark,
++ ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig);
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif
+--- /dev/null
++++ b/include/linux/lzma/Types.h
+@@ -0,0 +1,226 @@
++/* Types.h -- Basic types
++2009-11-23 : Igor Pavlov : Public domain */
++
++#ifndef __7Z_TYPES_H
++#define __7Z_TYPES_H
++
++#include <stddef.h>
++
++#ifdef _WIN32
++#include <windows.h>
++#endif
++
++#ifndef EXTERN_C_BEGIN
++#ifdef __cplusplus
++#define EXTERN_C_BEGIN extern "C" {
++#define EXTERN_C_END }
++#else
++#define EXTERN_C_BEGIN
++#define EXTERN_C_END
++#endif
++#endif
++
++EXTERN_C_BEGIN
++
++#define SZ_OK 0
++
++#define SZ_ERROR_DATA 1
++#define SZ_ERROR_MEM 2
++#define SZ_ERROR_CRC 3
++#define SZ_ERROR_UNSUPPORTED 4
++#define SZ_ERROR_PARAM 5
++#define SZ_ERROR_INPUT_EOF 6
++#define SZ_ERROR_OUTPUT_EOF 7
++#define SZ_ERROR_READ 8
++#define SZ_ERROR_WRITE 9
++#define SZ_ERROR_PROGRESS 10
++#define SZ_ERROR_FAIL 11
++#define SZ_ERROR_THREAD 12
++
++#define SZ_ERROR_ARCHIVE 16
++#define SZ_ERROR_NO_ARCHIVE 17
++
++typedef int SRes;
++
++#ifdef _WIN32
++typedef DWORD WRes;
++#else
++typedef int WRes;
++#endif
++
++#ifndef RINOK
++#define RINOK(x) { int __result__ = (x); if (__result__ != 0) return __result__; }
++#endif
++
++typedef unsigned char Byte;
++typedef short Int16;
++typedef unsigned short UInt16;
++
++#ifdef _LZMA_UINT32_IS_ULONG
++typedef long Int32;
++typedef unsigned long UInt32;
++#else
++typedef int Int32;
++typedef unsigned int UInt32;
++#endif
++
++#ifdef _SZ_NO_INT_64
++
++/* define _SZ_NO_INT_64, if your compiler doesn't support 64-bit integers.
++ NOTES: Some code will work incorrectly in that case! */
++
++typedef long Int64;
++typedef unsigned long UInt64;
++
++#else
++
++#if defined(_MSC_VER) || defined(__BORLANDC__)
++typedef __int64 Int64;
++typedef unsigned __int64 UInt64;
++#else
++typedef long long int Int64;
++typedef unsigned long long int UInt64;
++#endif
++
++#endif
++
++#ifdef _LZMA_NO_SYSTEM_SIZE_T
++typedef UInt32 SizeT;
++#else
++typedef size_t SizeT;
++#endif
++
++typedef int Bool;
++#define True 1
++#define False 0
++
++
++#ifdef _WIN32
++#define MY_STD_CALL __stdcall
++#else
++#define MY_STD_CALL
++#endif
++
++#ifdef _MSC_VER
++
++#if _MSC_VER >= 1300
++#define MY_NO_INLINE __declspec(noinline)
++#else
++#define MY_NO_INLINE
++#endif
++
++#define MY_CDECL __cdecl
++#define MY_FAST_CALL __fastcall
++
++#else
++
++#define MY_CDECL
++#define MY_FAST_CALL
++
++#endif
++
++
++/* The following interfaces use first parameter as pointer to structure */
++
++typedef struct
++{
++ SRes (*Read)(void *p, void *buf, size_t *size);
++ /* if (input(*size) != 0 && output(*size) == 0) means end_of_stream.
++ (output(*size) < input(*size)) is allowed */
++} ISeqInStream;
++
++/* it can return SZ_ERROR_INPUT_EOF */
++SRes SeqInStream_Read(ISeqInStream *stream, void *buf, size_t size);
++SRes SeqInStream_Read2(ISeqInStream *stream, void *buf, size_t size, SRes errorType);
++SRes SeqInStream_ReadByte(ISeqInStream *stream, Byte *buf);
++
++typedef struct
++{
++ size_t (*Write)(void *p, const void *buf, size_t size);
++ /* Returns: result - the number of actually written bytes.
++ (result < size) means error */
++} ISeqOutStream;
++
++typedef enum
++{
++ SZ_SEEK_SET = 0,
++ SZ_SEEK_CUR = 1,
++ SZ_SEEK_END = 2
++} ESzSeek;
++
++typedef struct
++{
++ SRes (*Read)(void *p, void *buf, size_t *size); /* same as ISeqInStream::Read */
++ SRes (*Seek)(void *p, Int64 *pos, ESzSeek origin);
++} ISeekInStream;
++
++typedef struct
++{
++ SRes (*Look)(void *p, void **buf, size_t *size);
++ /* if (input(*size) != 0 && output(*size) == 0) means end_of_stream.
++ (output(*size) > input(*size)) is not allowed
++ (output(*size) < input(*size)) is allowed */
++ SRes (*Skip)(void *p, size_t offset);
++ /* offset must be <= output(*size) of Look */
++
++ SRes (*Read)(void *p, void *buf, size_t *size);
++ /* reads directly (without buffer). It's same as ISeqInStream::Read */
++ SRes (*Seek)(void *p, Int64 *pos, ESzSeek origin);
++} ILookInStream;
++
++SRes LookInStream_LookRead(ILookInStream *stream, void *buf, size_t *size);
++SRes LookInStream_SeekTo(ILookInStream *stream, UInt64 offset);
++
++/* reads via ILookInStream::Read */
++SRes LookInStream_Read2(ILookInStream *stream, void *buf, size_t size, SRes errorType);
++SRes LookInStream_Read(ILookInStream *stream, void *buf, size_t size);
++
++#define LookToRead_BUF_SIZE (1 << 14)
++
++typedef struct
++{
++ ILookInStream s;
++ ISeekInStream *realStream;
++ size_t pos;
++ size_t size;
++ Byte buf[LookToRead_BUF_SIZE];
++} CLookToRead;
++
++void LookToRead_CreateVTable(CLookToRead *p, int lookahead);
++void LookToRead_Init(CLookToRead *p);
++
++typedef struct
++{
++ ISeqInStream s;
++ ILookInStream *realStream;
++} CSecToLook;
++
++void SecToLook_CreateVTable(CSecToLook *p);
++
++typedef struct
++{
++ ISeqInStream s;
++ ILookInStream *realStream;
++} CSecToRead;
++
++void SecToRead_CreateVTable(CSecToRead *p);
++
++typedef struct
++{
++ SRes (*Progress)(void *p, UInt64 inSize, UInt64 outSize);
++ /* Returns: result. (result != SZ_OK) means break.
++ Value (UInt64)(Int64)-1 for size means unknown value. */
++} ICompressProgress;
++
++typedef struct
++{
++ void *(*Alloc)(void *p, size_t size);
++ void (*Free)(void *p, void *address); /* address can be 0 */
++} ISzAlloc;
++
++#define IAlloc_Alloc(p, size) (p)->Alloc((p), size)
++#define IAlloc_Free(p, a) (p)->Free((p), a)
++
++EXTERN_C_END
++
++#endif
+--- a/include/uapi/linux/jffs2.h
++++ b/include/uapi/linux/jffs2.h
+@@ -46,6 +46,7 @@
+ #define JFFS2_COMPR_DYNRUBIN 0x05
+ #define JFFS2_COMPR_ZLIB 0x06
+ #define JFFS2_COMPR_LZO 0x07
++#define JFFS2_COMPR_LZMA 0x08
+ /* Compatibility flags. */
+ #define JFFS2_COMPAT_MASK 0xc000 /* What do to if an unknown nodetype is found */
+ #define JFFS2_NODE_ACCURATE 0x2000
+--- a/lib/Kconfig
++++ b/lib/Kconfig
+@@ -316,6 +316,12 @@ config ZSTD_DECOMPRESS
+
+ source "lib/xz/Kconfig"
+
++config LZMA_COMPRESS
++ tristate
++
++config LZMA_DECOMPRESS
++ tristate
++
+ #
+ # These all provide a common interface (hence the apparent duplication with
+ # ZLIB_INFLATE; DECOMPRESS_GZIP is just a wrapper.)
+--- a/lib/Makefile
++++ b/lib/Makefile
+@@ -136,6 +136,16 @@ CFLAGS_kobject.o += -DDEBUG
+ CFLAGS_kobject_uevent.o += -DDEBUG
+ endif
+
++ifdef CONFIG_JFFS2_ZLIB
++ CONFIG_ZLIB_INFLATE:=y
++ CONFIG_ZLIB_DEFLATE:=y
++endif
++
++ifdef CONFIG_JFFS2_LZMA
++ CONFIG_LZMA_DECOMPRESS:=y
++ CONFIG_LZMA_COMPRESS:=y
++endif
++
+ obj-$(CONFIG_DEBUG_INFO_REDUCED) += debug_info.o
+ CFLAGS_debug_info.o += $(call cc-option, -femit-struct-debug-detailed=any)
+
+@@ -191,6 +201,8 @@ obj-$(CONFIG_ZSTD_COMPRESS) += zstd/
+ obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd/
+ obj-$(CONFIG_XZ_DEC) += xz/
+ obj-$(CONFIG_RAID6_PQ) += raid6/
++obj-$(CONFIG_LZMA_COMPRESS) += lzma/
++obj-$(CONFIG_LZMA_DECOMPRESS) += lzma/
+
+ lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o
+ lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o
+--- /dev/null
++++ b/lib/lzma/LzFind.c
+@@ -0,0 +1,761 @@
++/* LzFind.c -- Match finder for LZ algorithms
++2009-04-22 : Igor Pavlov : Public domain */
++
++#include <string.h>
++
++#include "LzFind.h"
++#include "LzHash.h"
++
++#define kEmptyHashValue 0
++#define kMaxValForNormalize ((UInt32)0xFFFFFFFF)
++#define kNormalizeStepMin (1 << 10) /* it must be power of 2 */
++#define kNormalizeMask (~(kNormalizeStepMin - 1))
++#define kMaxHistorySize ((UInt32)3 << 30)
++
++#define kStartMaxLen 3
++
++static void LzInWindow_Free(CMatchFinder *p, ISzAlloc *alloc)
++{
++ if (!p->directInput)
++ {
++ alloc->Free(alloc, p->bufferBase);
++ p->bufferBase = 0;
++ }
++}
++
++/* keepSizeBefore + keepSizeAfter + keepSizeReserv must be < 4G) */
++
++static int LzInWindow_Create(CMatchFinder *p, UInt32 keepSizeReserv, ISzAlloc *alloc)
++{
++ UInt32 blockSize = p->keepSizeBefore + p->keepSizeAfter + keepSizeReserv;
++ if (p->directInput)
++ {
++ p->blockSize = blockSize;
++ return 1;
++ }
++ if (p->bufferBase == 0 || p->blockSize != blockSize)
++ {
++ LzInWindow_Free(p, alloc);
++ p->blockSize = blockSize;
++ p->bufferBase = (Byte *)alloc->Alloc(alloc, (size_t)blockSize);
++ }
++ return (p->bufferBase != 0);
++}
++
++Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p) { return p->buffer; }
++Byte MatchFinder_GetIndexByte(CMatchFinder *p, Int32 index) { return p->buffer[index]; }
++
++UInt32 MatchFinder_GetNumAvailableBytes(CMatchFinder *p) { return p->streamPos - p->pos; }
++
++void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue)
++{
++ p->posLimit -= subValue;
++ p->pos -= subValue;
++ p->streamPos -= subValue;
++}
++
++static void MatchFinder_ReadBlock(CMatchFinder *p)
++{
++ if (p->streamEndWasReached || p->result != SZ_OK)
++ return;
++ if (p->directInput)
++ {
++ UInt32 curSize = 0xFFFFFFFF - p->streamPos;
++ if (curSize > p->directInputRem)
++ curSize = (UInt32)p->directInputRem;
++ p->directInputRem -= curSize;
++ p->streamPos += curSize;
++ if (p->directInputRem == 0)
++ p->streamEndWasReached = 1;
++ return;
++ }
++ for (;;)
++ {
++ Byte *dest = p->buffer + (p->streamPos - p->pos);
++ size_t size = (p->bufferBase + p->blockSize - dest);
++ if (size == 0)
++ return;
++ p->result = p->stream->Read(p->stream, dest, &size);
++ if (p->result != SZ_OK)
++ return;
++ if (size == 0)
++ {
++ p->streamEndWasReached = 1;
++ return;
++ }
++ p->streamPos += (UInt32)size;
++ if (p->streamPos - p->pos > p->keepSizeAfter)
++ return;
++ }
++}
++
++void MatchFinder_MoveBlock(CMatchFinder *p)
++{
++ memmove(p->bufferBase,
++ p->buffer - p->keepSizeBefore,
++ (size_t)(p->streamPos - p->pos + p->keepSizeBefore));
++ p->buffer = p->bufferBase + p->keepSizeBefore;
++}
++
++int MatchFinder_NeedMove(CMatchFinder *p)
++{
++ if (p->directInput)
++ return 0;
++ /* if (p->streamEndWasReached) return 0; */
++ return ((size_t)(p->bufferBase + p->blockSize - p->buffer) <= p->keepSizeAfter);
++}
++
++void MatchFinder_ReadIfRequired(CMatchFinder *p)
++{
++ if (p->streamEndWasReached)
++ return;
++ if (p->keepSizeAfter >= p->streamPos - p->pos)
++ MatchFinder_ReadBlock(p);
++}
++
++static void MatchFinder_CheckAndMoveAndRead(CMatchFinder *p)
++{
++ if (MatchFinder_NeedMove(p))
++ MatchFinder_MoveBlock(p);
++ MatchFinder_ReadBlock(p);
++}
++
++static void MatchFinder_SetDefaultSettings(CMatchFinder *p)
++{
++ p->cutValue = 32;
++ p->btMode = 1;
++ p->numHashBytes = 4;
++ p->bigHash = 0;
++}
++
++#define kCrcPoly 0xEDB88320
++
++void MatchFinder_Construct(CMatchFinder *p)
++{
++ UInt32 i;
++ p->bufferBase = 0;
++ p->directInput = 0;
++ p->hash = 0;
++ MatchFinder_SetDefaultSettings(p);
++
++ for (i = 0; i < 256; i++)
++ {
++ UInt32 r = i;
++ int j;
++ for (j = 0; j < 8; j++)
++ r = (r >> 1) ^ (kCrcPoly & ~((r & 1) - 1));
++ p->crc[i] = r;
++ }
++}
++
++static void MatchFinder_FreeThisClassMemory(CMatchFinder *p, ISzAlloc *alloc)
++{
++ alloc->Free(alloc, p->hash);
++ p->hash = 0;
++}
++
++void MatchFinder_Free(CMatchFinder *p, ISzAlloc *alloc)
++{
++ MatchFinder_FreeThisClassMemory(p, alloc);
++ LzInWindow_Free(p, alloc);
++}
++
++static CLzRef* AllocRefs(UInt32 num, ISzAlloc *alloc)
++{
++ size_t sizeInBytes = (size_t)num * sizeof(CLzRef);
++ if (sizeInBytes / sizeof(CLzRef) != num)
++ return 0;
++ return (CLzRef *)alloc->Alloc(alloc, sizeInBytes);
++}
++
++int MatchFinder_Create(CMatchFinder *p, UInt32 historySize,
++ UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter,
++ ISzAlloc *alloc)
++{
++ UInt32 sizeReserv;
++ if (historySize > kMaxHistorySize)
++ {
++ MatchFinder_Free(p, alloc);
++ return 0;
++ }
++ sizeReserv = historySize >> 1;
++ if (historySize > ((UInt32)2 << 30))
++ sizeReserv = historySize >> 2;
++ sizeReserv += (keepAddBufferBefore + matchMaxLen + keepAddBufferAfter) / 2 + (1 << 19);
++
++ p->keepSizeBefore = historySize + keepAddBufferBefore + 1;
++ p->keepSizeAfter = matchMaxLen + keepAddBufferAfter;
++ /* we need one additional byte, since we use MoveBlock after pos++ and before dictionary using */
++ if (LzInWindow_Create(p, sizeReserv, alloc))
++ {
++ UInt32 newCyclicBufferSize = historySize + 1;
++ UInt32 hs;
++ p->matchMaxLen = matchMaxLen;
++ {
++ p->fixedHashSize = 0;
++ if (p->numHashBytes == 2)
++ hs = (1 << 16) - 1;
++ else
++ {
++ hs = historySize - 1;
++ hs |= (hs >> 1);
++ hs |= (hs >> 2);
++ hs |= (hs >> 4);
++ hs |= (hs >> 8);
++ hs >>= 1;
++ hs |= 0xFFFF; /* don't change it! It's required for Deflate */
++ if (hs > (1 << 24))
++ {
++ if (p->numHashBytes == 3)
++ hs = (1 << 24) - 1;
++ else
++ hs >>= 1;
++ }
++ }
++ p->hashMask = hs;
++ hs++;
++ if (p->numHashBytes > 2) p->fixedHashSize += kHash2Size;
++ if (p->numHashBytes > 3) p->fixedHashSize += kHash3Size;
++ if (p->numHashBytes > 4) p->fixedHashSize += kHash4Size;
++ hs += p->fixedHashSize;
++ }
++
++ {
++ UInt32 prevSize = p->hashSizeSum + p->numSons;
++ UInt32 newSize;
++ p->historySize = historySize;
++ p->hashSizeSum = hs;
++ p->cyclicBufferSize = newCyclicBufferSize;
++ p->numSons = (p->btMode ? newCyclicBufferSize * 2 : newCyclicBufferSize);
++ newSize = p->hashSizeSum + p->numSons;
++ if (p->hash != 0 && prevSize == newSize)
++ return 1;
++ MatchFinder_FreeThisClassMemory(p, alloc);
++ p->hash = AllocRefs(newSize, alloc);
++ if (p->hash != 0)
++ {
++ p->son = p->hash + p->hashSizeSum;
++ return 1;
++ }
++ }
++ }
++ MatchFinder_Free(p, alloc);
++ return 0;
++}
++
++static void MatchFinder_SetLimits(CMatchFinder *p)
++{
++ UInt32 limit = kMaxValForNormalize - p->pos;
++ UInt32 limit2 = p->cyclicBufferSize - p->cyclicBufferPos;
++ if (limit2 < limit)
++ limit = limit2;
++ limit2 = p->streamPos - p->pos;
++ if (limit2 <= p->keepSizeAfter)
++ {
++ if (limit2 > 0)
++ limit2 = 1;
++ }
++ else
++ limit2 -= p->keepSizeAfter;
++ if (limit2 < limit)
++ limit = limit2;
++ {
++ UInt32 lenLimit = p->streamPos - p->pos;
++ if (lenLimit > p->matchMaxLen)
++ lenLimit = p->matchMaxLen;
++ p->lenLimit = lenLimit;
++ }
++ p->posLimit = p->pos + limit;
++}
++
++void MatchFinder_Init(CMatchFinder *p)
++{
++ UInt32 i;
++ for (i = 0; i < p->hashSizeSum; i++)
++ p->hash[i] = kEmptyHashValue;
++ p->cyclicBufferPos = 0;
++ p->buffer = p->bufferBase;
++ p->pos = p->streamPos = p->cyclicBufferSize;
++ p->result = SZ_OK;
++ p->streamEndWasReached = 0;
++ MatchFinder_ReadBlock(p);
++ MatchFinder_SetLimits(p);
++}
++
++static UInt32 MatchFinder_GetSubValue(CMatchFinder *p)
++{
++ return (p->pos - p->historySize - 1) & kNormalizeMask;
++}
++
++void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, UInt32 numItems)
++{
++ UInt32 i;
++ for (i = 0; i < numItems; i++)
++ {
++ UInt32 value = items[i];
++ if (value <= subValue)
++ value = kEmptyHashValue;
++ else
++ value -= subValue;
++ items[i] = value;
++ }
++}
++
++static void MatchFinder_Normalize(CMatchFinder *p)
++{
++ UInt32 subValue = MatchFinder_GetSubValue(p);
++ MatchFinder_Normalize3(subValue, p->hash, p->hashSizeSum + p->numSons);
++ MatchFinder_ReduceOffsets(p, subValue);
++}
++
++static void MatchFinder_CheckLimits(CMatchFinder *p)
++{
++ if (p->pos == kMaxValForNormalize)
++ MatchFinder_Normalize(p);
++ if (!p->streamEndWasReached && p->keepSizeAfter == p->streamPos - p->pos)
++ MatchFinder_CheckAndMoveAndRead(p);
++ if (p->cyclicBufferPos == p->cyclicBufferSize)
++ p->cyclicBufferPos = 0;
++ MatchFinder_SetLimits(p);
++}
++
++static UInt32 * Hc_GetMatchesSpec(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
++ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue,
++ UInt32 *distances, UInt32 maxLen)
++{
++ son[_cyclicBufferPos] = curMatch;
++ for (;;)
++ {
++ UInt32 delta = pos - curMatch;
++ if (cutValue-- == 0 || delta >= _cyclicBufferSize)
++ return distances;
++ {
++ const Byte *pb = cur - delta;
++ curMatch = son[_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)];
++ if (pb[maxLen] == cur[maxLen] && *pb == *cur)
++ {
++ UInt32 len = 0;
++ while (++len != lenLimit)
++ if (pb[len] != cur[len])
++ break;
++ if (maxLen < len)
++ {
++ *distances++ = maxLen = len;
++ *distances++ = delta - 1;
++ if (len == lenLimit)
++ return distances;
++ }
++ }
++ }
++ }
++}
++
++UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
++ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue,
++ UInt32 *distances, UInt32 maxLen)
++{
++ CLzRef *ptr0 = son + (_cyclicBufferPos << 1) + 1;
++ CLzRef *ptr1 = son + (_cyclicBufferPos << 1);
++ UInt32 len0 = 0, len1 = 0;
++ for (;;)
++ {
++ UInt32 delta = pos - curMatch;
++ if (cutValue-- == 0 || delta >= _cyclicBufferSize)
++ {
++ *ptr0 = *ptr1 = kEmptyHashValue;
++ return distances;
++ }
++ {
++ CLzRef *pair = son + ((_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)) << 1);
++ const Byte *pb = cur - delta;
++ UInt32 len = (len0 < len1 ? len0 : len1);
++ if (pb[len] == cur[len])
++ {
++ if (++len != lenLimit && pb[len] == cur[len])
++ while (++len != lenLimit)
++ if (pb[len] != cur[len])
++ break;
++ if (maxLen < len)
++ {
++ *distances++ = maxLen = len;
++ *distances++ = delta - 1;
++ if (len == lenLimit)
++ {
++ *ptr1 = pair[0];
++ *ptr0 = pair[1];
++ return distances;
++ }
++ }
++ }
++ if (pb[len] < cur[len])
++ {
++ *ptr1 = curMatch;
++ ptr1 = pair + 1;
++ curMatch = *ptr1;
++ len1 = len;
++ }
++ else
++ {
++ *ptr0 = curMatch;
++ ptr0 = pair;
++ curMatch = *ptr0;
++ len0 = len;
++ }
++ }
++ }
++}
++
++static void SkipMatchesSpec(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
++ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue)
++{
++ CLzRef *ptr0 = son + (_cyclicBufferPos << 1) + 1;
++ CLzRef *ptr1 = son + (_cyclicBufferPos << 1);
++ UInt32 len0 = 0, len1 = 0;
++ for (;;)
++ {
++ UInt32 delta = pos - curMatch;
++ if (cutValue-- == 0 || delta >= _cyclicBufferSize)
++ {
++ *ptr0 = *ptr1 = kEmptyHashValue;
++ return;
++ }
++ {
++ CLzRef *pair = son + ((_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)) << 1);
++ const Byte *pb = cur - delta;
++ UInt32 len = (len0 < len1 ? len0 : len1);
++ if (pb[len] == cur[len])
++ {
++ while (++len != lenLimit)
++ if (pb[len] != cur[len])
++ break;
++ {
++ if (len == lenLimit)
++ {
++ *ptr1 = pair[0];
++ *ptr0 = pair[1];
++ return;
++ }
++ }
++ }
++ if (pb[len] < cur[len])
++ {
++ *ptr1 = curMatch;
++ ptr1 = pair + 1;
++ curMatch = *ptr1;
++ len1 = len;
++ }
++ else
++ {
++ *ptr0 = curMatch;
++ ptr0 = pair;
++ curMatch = *ptr0;
++ len0 = len;
++ }
++ }
++ }
++}
++
++#define MOVE_POS \
++ ++p->cyclicBufferPos; \
++ p->buffer++; \
++ if (++p->pos == p->posLimit) MatchFinder_CheckLimits(p);
++
++#define MOVE_POS_RET MOVE_POS return offset;
++
++static void MatchFinder_MovePos(CMatchFinder *p) { MOVE_POS; }
++
++#define GET_MATCHES_HEADER2(minLen, ret_op) \
++ UInt32 lenLimit; UInt32 hashValue; const Byte *cur; UInt32 curMatch; \
++ lenLimit = p->lenLimit; { if (lenLimit < minLen) { MatchFinder_MovePos(p); ret_op; }} \
++ cur = p->buffer;
++
++#define GET_MATCHES_HEADER(minLen) GET_MATCHES_HEADER2(minLen, return 0)
++#define SKIP_HEADER(minLen) GET_MATCHES_HEADER2(minLen, continue)
++
++#define MF_PARAMS(p) p->pos, p->buffer, p->son, p->cyclicBufferPos, p->cyclicBufferSize, p->cutValue
++
++#define GET_MATCHES_FOOTER(offset, maxLen) \
++ offset = (UInt32)(GetMatchesSpec1(lenLimit, curMatch, MF_PARAMS(p), \
++ distances + offset, maxLen) - distances); MOVE_POS_RET;
++
++#define SKIP_FOOTER \
++ SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p)); MOVE_POS;
++
++static UInt32 Bt2_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
++{
++ UInt32 offset;
++ GET_MATCHES_HEADER(2)
++ HASH2_CALC;
++ curMatch = p->hash[hashValue];
++ p->hash[hashValue] = p->pos;
++ offset = 0;
++ GET_MATCHES_FOOTER(offset, 1)
++}
++
++UInt32 Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
++{
++ UInt32 offset;
++ GET_MATCHES_HEADER(3)
++ HASH_ZIP_CALC;
++ curMatch = p->hash[hashValue];
++ p->hash[hashValue] = p->pos;
++ offset = 0;
++ GET_MATCHES_FOOTER(offset, 2)
++}
++
++static UInt32 Bt3_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
++{
++ UInt32 hash2Value, delta2, maxLen, offset;
++ GET_MATCHES_HEADER(3)
++
++ HASH3_CALC;
++
++ delta2 = p->pos - p->hash[hash2Value];
++ curMatch = p->hash[kFix3HashSize + hashValue];
++
++ p->hash[hash2Value] =
++ p->hash[kFix3HashSize + hashValue] = p->pos;
++
++
++ maxLen = 2;
++ offset = 0;
++ if (delta2 < p->cyclicBufferSize && *(cur - delta2) == *cur)
++ {
++ for (; maxLen != lenLimit; maxLen++)
++ if (cur[(ptrdiff_t)maxLen - delta2] != cur[maxLen])
++ break;
++ distances[0] = maxLen;
++ distances[1] = delta2 - 1;
++ offset = 2;
++ if (maxLen == lenLimit)
++ {
++ SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p));
++ MOVE_POS_RET;
++ }
++ }
++ GET_MATCHES_FOOTER(offset, maxLen)
++}
++
++static UInt32 Bt4_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
++{
++ UInt32 hash2Value, hash3Value, delta2, delta3, maxLen, offset;
++ GET_MATCHES_HEADER(4)
++
++ HASH4_CALC;
++
++ delta2 = p->pos - p->hash[ hash2Value];
++ delta3 = p->pos - p->hash[kFix3HashSize + hash3Value];
++ curMatch = p->hash[kFix4HashSize + hashValue];
++
++ p->hash[ hash2Value] =
++ p->hash[kFix3HashSize + hash3Value] =
++ p->hash[kFix4HashSize + hashValue] = p->pos;
++
++ maxLen = 1;
++ offset = 0;
++ if (delta2 < p->cyclicBufferSize && *(cur - delta2) == *cur)
++ {
++ distances[0] = maxLen = 2;
++ distances[1] = delta2 - 1;
++ offset = 2;
++ }
++ if (delta2 != delta3 && delta3 < p->cyclicBufferSize && *(cur - delta3) == *cur)
++ {
++ maxLen = 3;
++ distances[offset + 1] = delta3 - 1;
++ offset += 2;
++ delta2 = delta3;
++ }
++ if (offset != 0)
++ {
++ for (; maxLen != lenLimit; maxLen++)
++ if (cur[(ptrdiff_t)maxLen - delta2] != cur[maxLen])
++ break;
++ distances[offset - 2] = maxLen;
++ if (maxLen == lenLimit)
++ {
++ SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p));
++ MOVE_POS_RET;
++ }
++ }
++ if (maxLen < 3)
++ maxLen = 3;
++ GET_MATCHES_FOOTER(offset, maxLen)
++}
++
++static UInt32 Hc4_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
++{
++ UInt32 hash2Value, hash3Value, delta2, delta3, maxLen, offset;
++ GET_MATCHES_HEADER(4)
++
++ HASH4_CALC;
++
++ delta2 = p->pos - p->hash[ hash2Value];
++ delta3 = p->pos - p->hash[kFix3HashSize + hash3Value];
++ curMatch = p->hash[kFix4HashSize + hashValue];
++
++ p->hash[ hash2Value] =
++ p->hash[kFix3HashSize + hash3Value] =
++ p->hash[kFix4HashSize + hashValue] = p->pos;
++
++ maxLen = 1;
++ offset = 0;
++ if (delta2 < p->cyclicBufferSize && *(cur - delta2) == *cur)
++ {
++ distances[0] = maxLen = 2;
++ distances[1] = delta2 - 1;
++ offset = 2;
++ }
++ if (delta2 != delta3 && delta3 < p->cyclicBufferSize && *(cur - delta3) == *cur)
++ {
++ maxLen = 3;
++ distances[offset + 1] = delta3 - 1;
++ offset += 2;
++ delta2 = delta3;
++ }
++ if (offset != 0)
++ {
++ for (; maxLen != lenLimit; maxLen++)
++ if (cur[(ptrdiff_t)maxLen - delta2] != cur[maxLen])
++ break;
++ distances[offset - 2] = maxLen;
++ if (maxLen == lenLimit)
++ {
++ p->son[p->cyclicBufferPos] = curMatch;
++ MOVE_POS_RET;
++ }
++ }
++ if (maxLen < 3)
++ maxLen = 3;
++ offset = (UInt32)(Hc_GetMatchesSpec(lenLimit, curMatch, MF_PARAMS(p),
++ distances + offset, maxLen) - (distances));
++ MOVE_POS_RET
++}
++
++UInt32 Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
++{
++ UInt32 offset;
++ GET_MATCHES_HEADER(3)
++ HASH_ZIP_CALC;
++ curMatch = p->hash[hashValue];
++ p->hash[hashValue] = p->pos;
++ offset = (UInt32)(Hc_GetMatchesSpec(lenLimit, curMatch, MF_PARAMS(p),
++ distances, 2) - (distances));
++ MOVE_POS_RET
++}
++
++static void Bt2_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
++{
++ do
++ {
++ SKIP_HEADER(2)
++ HASH2_CALC;
++ curMatch = p->hash[hashValue];
++ p->hash[hashValue] = p->pos;
++ SKIP_FOOTER
++ }
++ while (--num != 0);
++}
++
++void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
++{
++ do
++ {
++ SKIP_HEADER(3)
++ HASH_ZIP_CALC;
++ curMatch = p->hash[hashValue];
++ p->hash[hashValue] = p->pos;
++ SKIP_FOOTER
++ }
++ while (--num != 0);
++}
++
++static void Bt3_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
++{
++ do
++ {
++ UInt32 hash2Value;
++ SKIP_HEADER(3)
++ HASH3_CALC;
++ curMatch = p->hash[kFix3HashSize + hashValue];
++ p->hash[hash2Value] =
++ p->hash[kFix3HashSize + hashValue] = p->pos;
++ SKIP_FOOTER
++ }
++ while (--num != 0);
++}
++
++static void Bt4_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
++{
++ do
++ {
++ UInt32 hash2Value, hash3Value;
++ SKIP_HEADER(4)
++ HASH4_CALC;
++ curMatch = p->hash[kFix4HashSize + hashValue];
++ p->hash[ hash2Value] =
++ p->hash[kFix3HashSize + hash3Value] = p->pos;
++ p->hash[kFix4HashSize + hashValue] = p->pos;
++ SKIP_FOOTER
++ }
++ while (--num != 0);
++}
++
++static void Hc4_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
++{
++ do
++ {
++ UInt32 hash2Value, hash3Value;
++ SKIP_HEADER(4)
++ HASH4_CALC;
++ curMatch = p->hash[kFix4HashSize + hashValue];
++ p->hash[ hash2Value] =
++ p->hash[kFix3HashSize + hash3Value] =
++ p->hash[kFix4HashSize + hashValue] = p->pos;
++ p->son[p->cyclicBufferPos] = curMatch;
++ MOVE_POS
++ }
++ while (--num != 0);
++}
++
++void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
++{
++ do
++ {
++ SKIP_HEADER(3)
++ HASH_ZIP_CALC;
++ curMatch = p->hash[hashValue];
++ p->hash[hashValue] = p->pos;
++ p->son[p->cyclicBufferPos] = curMatch;
++ MOVE_POS
++ }
++ while (--num != 0);
++}
++
++void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder *vTable)
++{
++ vTable->Init = (Mf_Init_Func)MatchFinder_Init;
++ vTable->GetIndexByte = (Mf_GetIndexByte_Func)MatchFinder_GetIndexByte;
++ vTable->GetNumAvailableBytes = (Mf_GetNumAvailableBytes_Func)MatchFinder_GetNumAvailableBytes;
++ vTable->GetPointerToCurrentPos = (Mf_GetPointerToCurrentPos_Func)MatchFinder_GetPointerToCurrentPos;
++ if (!p->btMode)
++ {
++ vTable->GetMatches = (Mf_GetMatches_Func)Hc4_MatchFinder_GetMatches;
++ vTable->Skip = (Mf_Skip_Func)Hc4_MatchFinder_Skip;
++ }
++ else if (p->numHashBytes == 2)
++ {
++ vTable->GetMatches = (Mf_GetMatches_Func)Bt2_MatchFinder_GetMatches;
++ vTable->Skip = (Mf_Skip_Func)Bt2_MatchFinder_Skip;
++ }
++ else if (p->numHashBytes == 3)
++ {
++ vTable->GetMatches = (Mf_GetMatches_Func)Bt3_MatchFinder_GetMatches;
++ vTable->Skip = (Mf_Skip_Func)Bt3_MatchFinder_Skip;
++ }
++ else
++ {
++ vTable->GetMatches = (Mf_GetMatches_Func)Bt4_MatchFinder_GetMatches;
++ vTable->Skip = (Mf_Skip_Func)Bt4_MatchFinder_Skip;
++ }
++}
+--- /dev/null
++++ b/lib/lzma/LzmaDec.c
+@@ -0,0 +1,999 @@
++/* LzmaDec.c -- LZMA Decoder
++2009-09-20 : Igor Pavlov : Public domain */
++
++#include "LzmaDec.h"
++
++#include <string.h>
++
++#define kNumTopBits 24
++#define kTopValue ((UInt32)1 << kNumTopBits)
++
++#define kNumBitModelTotalBits 11
++#define kBitModelTotal (1 << kNumBitModelTotalBits)
++#define kNumMoveBits 5
++
++#define RC_INIT_SIZE 5
++
++#define NORMALIZE if (range < kTopValue) { range <<= 8; code = (code << 8) | (*buf++); }
++
++#define IF_BIT_0(p) ttt = *(p); NORMALIZE; bound = (range >> kNumBitModelTotalBits) * ttt; if (code < bound)
++#define UPDATE_0(p) range = bound; *(p) = (CLzmaProb)(ttt + ((kBitModelTotal - ttt) >> kNumMoveBits));
++#define UPDATE_1(p) range -= bound; code -= bound; *(p) = (CLzmaProb)(ttt - (ttt >> kNumMoveBits));
++#define GET_BIT2(p, i, A0, A1) IF_BIT_0(p) \
++ { UPDATE_0(p); i = (i + i); A0; } else \
++ { UPDATE_1(p); i = (i + i) + 1; A1; }
++#define GET_BIT(p, i) GET_BIT2(p, i, ; , ;)
++
++#define TREE_GET_BIT(probs, i) { GET_BIT((probs + i), i); }
++#define TREE_DECODE(probs, limit, i) \
++ { i = 1; do { TREE_GET_BIT(probs, i); } while (i < limit); i -= limit; }
++
++/* #define _LZMA_SIZE_OPT */
++
++#ifdef _LZMA_SIZE_OPT
++#define TREE_6_DECODE(probs, i) TREE_DECODE(probs, (1 << 6), i)
++#else
++#define TREE_6_DECODE(probs, i) \
++ { i = 1; \
++ TREE_GET_BIT(probs, i); \
++ TREE_GET_BIT(probs, i); \
++ TREE_GET_BIT(probs, i); \
++ TREE_GET_BIT(probs, i); \
++ TREE_GET_BIT(probs, i); \
++ TREE_GET_BIT(probs, i); \
++ i -= 0x40; }
++#endif
++
++#define NORMALIZE_CHECK if (range < kTopValue) { if (buf >= bufLimit) return DUMMY_ERROR; range <<= 8; code = (code << 8) | (*buf++); }
++
++#define IF_BIT_0_CHECK(p) ttt = *(p); NORMALIZE_CHECK; bound = (range >> kNumBitModelTotalBits) * ttt; if (code < bound)
++#define UPDATE_0_CHECK range = bound;
++#define UPDATE_1_CHECK range -= bound; code -= bound;
++#define GET_BIT2_CHECK(p, i, A0, A1) IF_BIT_0_CHECK(p) \
++ { UPDATE_0_CHECK; i = (i + i); A0; } else \
++ { UPDATE_1_CHECK; i = (i + i) + 1; A1; }
++#define GET_BIT_CHECK(p, i) GET_BIT2_CHECK(p, i, ; , ;)
++#define TREE_DECODE_CHECK(probs, limit, i) \
++ { i = 1; do { GET_BIT_CHECK(probs + i, i) } while (i < limit); i -= limit; }
++
++
++#define kNumPosBitsMax 4
++#define kNumPosStatesMax (1 << kNumPosBitsMax)
++
++#define kLenNumLowBits 3
++#define kLenNumLowSymbols (1 << kLenNumLowBits)
++#define kLenNumMidBits 3
++#define kLenNumMidSymbols (1 << kLenNumMidBits)
++#define kLenNumHighBits 8
++#define kLenNumHighSymbols (1 << kLenNumHighBits)
++
++#define LenChoice 0
++#define LenChoice2 (LenChoice + 1)
++#define LenLow (LenChoice2 + 1)
++#define LenMid (LenLow + (kNumPosStatesMax << kLenNumLowBits))
++#define LenHigh (LenMid + (kNumPosStatesMax << kLenNumMidBits))
++#define kNumLenProbs (LenHigh + kLenNumHighSymbols)
++
++
++#define kNumStates 12
++#define kNumLitStates 7
++
++#define kStartPosModelIndex 4
++#define kEndPosModelIndex 14
++#define kNumFullDistances (1 << (kEndPosModelIndex >> 1))
++
++#define kNumPosSlotBits 6
++#define kNumLenToPosStates 4
++
++#define kNumAlignBits 4
++#define kAlignTableSize (1 << kNumAlignBits)
++
++#define kMatchMinLen 2
++#define kMatchSpecLenStart (kMatchMinLen + kLenNumLowSymbols + kLenNumMidSymbols + kLenNumHighSymbols)
++
++#define IsMatch 0
++#define IsRep (IsMatch + (kNumStates << kNumPosBitsMax))
++#define IsRepG0 (IsRep + kNumStates)
++#define IsRepG1 (IsRepG0 + kNumStates)
++#define IsRepG2 (IsRepG1 + kNumStates)
++#define IsRep0Long (IsRepG2 + kNumStates)
++#define PosSlot (IsRep0Long + (kNumStates << kNumPosBitsMax))
++#define SpecPos (PosSlot + (kNumLenToPosStates << kNumPosSlotBits))
++#define Align (SpecPos + kNumFullDistances - kEndPosModelIndex)
++#define LenCoder (Align + kAlignTableSize)
++#define RepLenCoder (LenCoder + kNumLenProbs)
++#define Literal (RepLenCoder + kNumLenProbs)
++
++#define LZMA_BASE_SIZE 1846
++#define LZMA_LIT_SIZE 768
++
++#define LzmaProps_GetNumProbs(p) ((UInt32)LZMA_BASE_SIZE + (LZMA_LIT_SIZE << ((p)->lc + (p)->lp)))
++
++#if Literal != LZMA_BASE_SIZE
++StopCompilingDueBUG
++#endif
++
++#define LZMA_DIC_MIN (1 << 12)
++
++/* First LZMA-symbol is always decoded.
++And it decodes new LZMA-symbols while (buf < bufLimit), but "buf" is without last normalization
++Out:
++ Result:
++ SZ_OK - OK
++ SZ_ERROR_DATA - Error
++ p->remainLen:
++ < kMatchSpecLenStart : normal remain
++ = kMatchSpecLenStart : finished
++ = kMatchSpecLenStart + 1 : Flush marker
++ = kMatchSpecLenStart + 2 : State Init Marker
++*/
++
++static int MY_FAST_CALL LzmaDec_DecodeReal(CLzmaDec *p, SizeT limit, const Byte *bufLimit)
++{
++ CLzmaProb *probs = p->probs;
++
++ unsigned state = p->state;
++ UInt32 rep0 = p->reps[0], rep1 = p->reps[1], rep2 = p->reps[2], rep3 = p->reps[3];
++ unsigned pbMask = ((unsigned)1 << (p->prop.pb)) - 1;
++ unsigned lpMask = ((unsigned)1 << (p->prop.lp)) - 1;
++ unsigned lc = p->prop.lc;
++
++ Byte *dic = p->dic;
++ SizeT dicBufSize = p->dicBufSize;
++ SizeT dicPos = p->dicPos;
++
++ UInt32 processedPos = p->processedPos;
++ UInt32 checkDicSize = p->checkDicSize;
++ unsigned len = 0;
++
++ const Byte *buf = p->buf;
++ UInt32 range = p->range;
++ UInt32 code = p->code;
++
++ do
++ {
++ CLzmaProb *prob;
++ UInt32 bound;
++ unsigned ttt;
++ unsigned posState = processedPos & pbMask;
++
++ prob = probs + IsMatch + (state << kNumPosBitsMax) + posState;
++ IF_BIT_0(prob)
++ {
++ unsigned symbol;
++ UPDATE_0(prob);
++ prob = probs + Literal;
++ if (checkDicSize != 0 || processedPos != 0)
++ prob += (LZMA_LIT_SIZE * (((processedPos & lpMask) << lc) +
++ (dic[(dicPos == 0 ? dicBufSize : dicPos) - 1] >> (8 - lc))));
++
++ if (state < kNumLitStates)
++ {
++ state -= (state < 4) ? state : 3;
++ symbol = 1;
++ do { GET_BIT(prob + symbol, symbol) } while (symbol < 0x100);
++ }
++ else
++ {
++ unsigned matchByte = p->dic[(dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0)];
++ unsigned offs = 0x100;
++ state -= (state < 10) ? 3 : 6;
++ symbol = 1;
++ do
++ {
++ unsigned bit;
++ CLzmaProb *probLit;
++ matchByte <<= 1;
++ bit = (matchByte & offs);
++ probLit = prob + offs + bit + symbol;
++ GET_BIT2(probLit, symbol, offs &= ~bit, offs &= bit)
++ }
++ while (symbol < 0x100);
++ }
++ dic[dicPos++] = (Byte)symbol;
++ processedPos++;
++ continue;
++ }
++ else
++ {
++ UPDATE_1(prob);
++ prob = probs + IsRep + state;
++ IF_BIT_0(prob)
++ {
++ UPDATE_0(prob);
++ state += kNumStates;
++ prob = probs + LenCoder;
++ }
++ else
++ {
++ UPDATE_1(prob);
++ if (checkDicSize == 0 && processedPos == 0)
++ return SZ_ERROR_DATA;
++ prob = probs + IsRepG0 + state;
++ IF_BIT_0(prob)
++ {
++ UPDATE_0(prob);
++ prob = probs + IsRep0Long + (state << kNumPosBitsMax) + posState;
++ IF_BIT_0(prob)
++ {
++ UPDATE_0(prob);
++ dic[dicPos] = dic[(dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0)];
++ dicPos++;
++ processedPos++;
++ state = state < kNumLitStates ? 9 : 11;
++ continue;
++ }
++ UPDATE_1(prob);
++ }
++ else
++ {
++ UInt32 distance;
++ UPDATE_1(prob);
++ prob = probs + IsRepG1 + state;
++ IF_BIT_0(prob)
++ {
++ UPDATE_0(prob);
++ distance = rep1;
++ }
++ else
++ {
++ UPDATE_1(prob);
++ prob = probs + IsRepG2 + state;
++ IF_BIT_0(prob)
++ {
++ UPDATE_0(prob);
++ distance = rep2;
++ }
++ else
++ {
++ UPDATE_1(prob);
++ distance = rep3;
++ rep3 = rep2;
++ }
++ rep2 = rep1;
++ }
++ rep1 = rep0;
++ rep0 = distance;
++ }
++ state = state < kNumLitStates ? 8 : 11;
++ prob = probs + RepLenCoder;
++ }
++ {
++ unsigned limit, offset;
++ CLzmaProb *probLen = prob + LenChoice;
++ IF_BIT_0(probLen)
++ {
++ UPDATE_0(probLen);
++ probLen = prob + LenLow + (posState << kLenNumLowBits);
++ offset = 0;
++ limit = (1 << kLenNumLowBits);
++ }
++ else
++ {
++ UPDATE_1(probLen);
++ probLen = prob + LenChoice2;
++ IF_BIT_0(probLen)
++ {
++ UPDATE_0(probLen);
++ probLen = prob + LenMid + (posState << kLenNumMidBits);
++ offset = kLenNumLowSymbols;
++ limit = (1 << kLenNumMidBits);
++ }
++ else
++ {
++ UPDATE_1(probLen);
++ probLen = prob + LenHigh;
++ offset = kLenNumLowSymbols + kLenNumMidSymbols;
++ limit = (1 << kLenNumHighBits);
++ }
++ }
++ TREE_DECODE(probLen, limit, len);
++ len += offset;
++ }
++
++ if (state >= kNumStates)
++ {
++ UInt32 distance;
++ prob = probs + PosSlot +
++ ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) << kNumPosSlotBits);
++ TREE_6_DECODE(prob, distance);
++ if (distance >= kStartPosModelIndex)
++ {
++ unsigned posSlot = (unsigned)distance;
++ int numDirectBits = (int)(((distance >> 1) - 1));
++ distance = (2 | (distance & 1));
++ if (posSlot < kEndPosModelIndex)
++ {
++ distance <<= numDirectBits;
++ prob = probs + SpecPos + distance - posSlot - 1;
++ {
++ UInt32 mask = 1;
++ unsigned i = 1;
++ do
++ {
++ GET_BIT2(prob + i, i, ; , distance |= mask);
++ mask <<= 1;
++ }
++ while (--numDirectBits != 0);
++ }
++ }
++ else
++ {
++ numDirectBits -= kNumAlignBits;
++ do
++ {
++ NORMALIZE
++ range >>= 1;
++
++ {
++ UInt32 t;
++ code -= range;
++ t = (0 - ((UInt32)code >> 31)); /* (UInt32)((Int32)code >> 31) */
++ distance = (distance << 1) + (t + 1);
++ code += range & t;
++ }
++ /*
++ distance <<= 1;
++ if (code >= range)
++ {
++ code -= range;
++ distance |= 1;
++ }
++ */
++ }
++ while (--numDirectBits != 0);
++ prob = probs + Align;
++ distance <<= kNumAlignBits;
++ {
++ unsigned i = 1;
++ GET_BIT2(prob + i, i, ; , distance |= 1);
++ GET_BIT2(prob + i, i, ; , distance |= 2);
++ GET_BIT2(prob + i, i, ; , distance |= 4);
++ GET_BIT2(prob + i, i, ; , distance |= 8);
++ }
++ if (distance == (UInt32)0xFFFFFFFF)
++ {
++ len += kMatchSpecLenStart;
++ state -= kNumStates;
++ break;
++ }
++ }
++ }
++ rep3 = rep2;
++ rep2 = rep1;
++ rep1 = rep0;
++ rep0 = distance + 1;
++ if (checkDicSize == 0)
++ {
++ if (distance >= processedPos)
++ return SZ_ERROR_DATA;
++ }
++ else if (distance >= checkDicSize)
++ return SZ_ERROR_DATA;
++ state = (state < kNumStates + kNumLitStates) ? kNumLitStates : kNumLitStates + 3;
++ }
++
++ len += kMatchMinLen;
++
++ if (limit == dicPos)
++ return SZ_ERROR_DATA;
++ {
++ SizeT rem = limit - dicPos;
++ unsigned curLen = ((rem < len) ? (unsigned)rem : len);
++ SizeT pos = (dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0);
++
++ processedPos += curLen;
++
++ len -= curLen;
++ if (pos + curLen <= dicBufSize)
++ {
++ Byte *dest = dic + dicPos;
++ ptrdiff_t src = (ptrdiff_t)pos - (ptrdiff_t)dicPos;
++ const Byte *lim = dest + curLen;
++ dicPos += curLen;
++ do
++ *(dest) = (Byte)*(dest + src);
++ while (++dest != lim);
++ }
++ else
++ {
++ do
++ {
++ dic[dicPos++] = dic[pos];
++ if (++pos == dicBufSize)
++ pos = 0;
++ }
++ while (--curLen != 0);
++ }
++ }
++ }
++ }
++ while (dicPos < limit && buf < bufLimit);
++ NORMALIZE;
++ p->buf = buf;
++ p->range = range;
++ p->code = code;
++ p->remainLen = len;
++ p->dicPos = dicPos;
++ p->processedPos = processedPos;
++ p->reps[0] = rep0;
++ p->reps[1] = rep1;
++ p->reps[2] = rep2;
++ p->reps[3] = rep3;
++ p->state = state;
++
++ return SZ_OK;
++}
++
++static void MY_FAST_CALL LzmaDec_WriteRem(CLzmaDec *p, SizeT limit)
++{
++ if (p->remainLen != 0 && p->remainLen < kMatchSpecLenStart)
++ {
++ Byte *dic = p->dic;
++ SizeT dicPos = p->dicPos;
++ SizeT dicBufSize = p->dicBufSize;
++ unsigned len = p->remainLen;
++ UInt32 rep0 = p->reps[0];
++ if (limit - dicPos < len)
++ len = (unsigned)(limit - dicPos);
++
++ if (p->checkDicSize == 0 && p->prop.dicSize - p->processedPos <= len)
++ p->checkDicSize = p->prop.dicSize;
++
++ p->processedPos += len;
++ p->remainLen -= len;
++ while (len-- != 0)
++ {
++ dic[dicPos] = dic[(dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0)];
++ dicPos++;
++ }
++ p->dicPos = dicPos;
++ }
++}
++
++static int MY_FAST_CALL LzmaDec_DecodeReal2(CLzmaDec *p, SizeT limit, const Byte *bufLimit)
++{
++ do
++ {
++ SizeT limit2 = limit;
++ if (p->checkDicSize == 0)
++ {
++ UInt32 rem = p->prop.dicSize - p->processedPos;
++ if (limit - p->dicPos > rem)
++ limit2 = p->dicPos + rem;
++ }
++ RINOK(LzmaDec_DecodeReal(p, limit2, bufLimit));
++ if (p->processedPos >= p->prop.dicSize)
++ p->checkDicSize = p->prop.dicSize;
++ LzmaDec_WriteRem(p, limit);
++ }
++ while (p->dicPos < limit && p->buf < bufLimit && p->remainLen < kMatchSpecLenStart);
++
++ if (p->remainLen > kMatchSpecLenStart)
++ {
++ p->remainLen = kMatchSpecLenStart;
++ }
++ return 0;
++}
++
++typedef enum
++{
++ DUMMY_ERROR, /* unexpected end of input stream */
++ DUMMY_LIT,
++ DUMMY_MATCH,
++ DUMMY_REP
++} ELzmaDummy;
++
++static ELzmaDummy LzmaDec_TryDummy(const CLzmaDec *p, const Byte *buf, SizeT inSize)
++{
++ UInt32 range = p->range;
++ UInt32 code = p->code;
++ const Byte *bufLimit = buf + inSize;
++ CLzmaProb *probs = p->probs;
++ unsigned state = p->state;
++ ELzmaDummy res;
++
++ {
++ CLzmaProb *prob;
++ UInt32 bound;
++ unsigned ttt;
++ unsigned posState = (p->processedPos) & ((1 << p->prop.pb) - 1);
++
++ prob = probs + IsMatch + (state << kNumPosBitsMax) + posState;
++ IF_BIT_0_CHECK(prob)
++ {
++ UPDATE_0_CHECK
++
++ /* if (bufLimit - buf >= 7) return DUMMY_LIT; */
++
++ prob = probs + Literal;
++ if (p->checkDicSize != 0 || p->processedPos != 0)
++ prob += (LZMA_LIT_SIZE *
++ ((((p->processedPos) & ((1 << (p->prop.lp)) - 1)) << p->prop.lc) +
++ (p->dic[(p->dicPos == 0 ? p->dicBufSize : p->dicPos) - 1] >> (8 - p->prop.lc))));
++
++ if (state < kNumLitStates)
++ {
++ unsigned symbol = 1;
++ do { GET_BIT_CHECK(prob + symbol, symbol) } while (symbol < 0x100);
++ }
++ else
++ {
++ unsigned matchByte = p->dic[p->dicPos - p->reps[0] +
++ ((p->dicPos < p->reps[0]) ? p->dicBufSize : 0)];
++ unsigned offs = 0x100;
++ unsigned symbol = 1;
++ do
++ {
++ unsigned bit;
++ CLzmaProb *probLit;
++ matchByte <<= 1;
++ bit = (matchByte & offs);
++ probLit = prob + offs + bit + symbol;
++ GET_BIT2_CHECK(probLit, symbol, offs &= ~bit, offs &= bit)
++ }
++ while (symbol < 0x100);
++ }
++ res = DUMMY_LIT;
++ }
++ else
++ {
++ unsigned len;
++ UPDATE_1_CHECK;
++
++ prob = probs + IsRep + state;
++ IF_BIT_0_CHECK(prob)
++ {
++ UPDATE_0_CHECK;
++ state = 0;
++ prob = probs + LenCoder;
++ res = DUMMY_MATCH;
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ res = DUMMY_REP;
++ prob = probs + IsRepG0 + state;
++ IF_BIT_0_CHECK(prob)
++ {
++ UPDATE_0_CHECK;
++ prob = probs + IsRep0Long + (state << kNumPosBitsMax) + posState;
++ IF_BIT_0_CHECK(prob)
++ {
++ UPDATE_0_CHECK;
++ NORMALIZE_CHECK;
++ return DUMMY_REP;
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ }
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ prob = probs + IsRepG1 + state;
++ IF_BIT_0_CHECK(prob)
++ {
++ UPDATE_0_CHECK;
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ prob = probs + IsRepG2 + state;
++ IF_BIT_0_CHECK(prob)
++ {
++ UPDATE_0_CHECK;
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ }
++ }
++ }
++ state = kNumStates;
++ prob = probs + RepLenCoder;
++ }
++ {
++ unsigned limit, offset;
++ CLzmaProb *probLen = prob + LenChoice;
++ IF_BIT_0_CHECK(probLen)
++ {
++ UPDATE_0_CHECK;
++ probLen = prob + LenLow + (posState << kLenNumLowBits);
++ offset = 0;
++ limit = 1 << kLenNumLowBits;
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ probLen = prob + LenChoice2;
++ IF_BIT_0_CHECK(probLen)
++ {
++ UPDATE_0_CHECK;
++ probLen = prob + LenMid + (posState << kLenNumMidBits);
++ offset = kLenNumLowSymbols;
++ limit = 1 << kLenNumMidBits;
++ }
++ else
++ {
++ UPDATE_1_CHECK;
++ probLen = prob + LenHigh;
++ offset = kLenNumLowSymbols + kLenNumMidSymbols;
++ limit = 1 << kLenNumHighBits;
++ }
++ }
++ TREE_DECODE_CHECK(probLen, limit, len);
++ len += offset;
++ }
++
++ if (state < 4)
++ {
++ unsigned posSlot;
++ prob = probs + PosSlot +
++ ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) <<
++ kNumPosSlotBits);
++ TREE_DECODE_CHECK(prob, 1 << kNumPosSlotBits, posSlot);
++ if (posSlot >= kStartPosModelIndex)
++ {
++ int numDirectBits = ((posSlot >> 1) - 1);
++
++ /* if (bufLimit - buf >= 8) return DUMMY_MATCH; */
++
++ if (posSlot < kEndPosModelIndex)
++ {
++ prob = probs + SpecPos + ((2 | (posSlot & 1)) << numDirectBits) - posSlot - 1;
++ }
++ else
++ {
++ numDirectBits -= kNumAlignBits;
++ do
++ {
++ NORMALIZE_CHECK
++ range >>= 1;
++ code -= range & (((code - range) >> 31) - 1);
++ /* if (code >= range) code -= range; */
++ }
++ while (--numDirectBits != 0);
++ prob = probs + Align;
++ numDirectBits = kNumAlignBits;
++ }
++ {
++ unsigned i = 1;
++ do
++ {
++ GET_BIT_CHECK(prob + i, i);
++ }
++ while (--numDirectBits != 0);
++ }
++ }
++ }
++ }
++ }
++ NORMALIZE_CHECK;
++ return res;
++}
++
++
++static void LzmaDec_InitRc(CLzmaDec *p, const Byte *data)
++{
++ p->code = ((UInt32)data[1] << 24) | ((UInt32)data[2] << 16) | ((UInt32)data[3] << 8) | ((UInt32)data[4]);
++ p->range = 0xFFFFFFFF;
++ p->needFlush = 0;
++}
++
++void LzmaDec_InitDicAndState(CLzmaDec *p, Bool initDic, Bool initState)
++{
++ p->needFlush = 1;
++ p->remainLen = 0;
++ p->tempBufSize = 0;
++
++ if (initDic)
++ {
++ p->processedPos = 0;
++ p->checkDicSize = 0;
++ p->needInitState = 1;
++ }
++ if (initState)
++ p->needInitState = 1;
++}
++
++void LzmaDec_Init(CLzmaDec *p)
++{
++ p->dicPos = 0;
++ LzmaDec_InitDicAndState(p, True, True);
++}
++
++static void LzmaDec_InitStateReal(CLzmaDec *p)
++{
++ UInt32 numProbs = Literal + ((UInt32)LZMA_LIT_SIZE << (p->prop.lc + p->prop.lp));
++ UInt32 i;
++ CLzmaProb *probs = p->probs;
++ for (i = 0; i < numProbs; i++)
++ probs[i] = kBitModelTotal >> 1;
++ p->reps[0] = p->reps[1] = p->reps[2] = p->reps[3] = 1;
++ p->state = 0;
++ p->needInitState = 0;
++}
++
++SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit, const Byte *src, SizeT *srcLen,
++ ELzmaFinishMode finishMode, ELzmaStatus *status)
++{
++ SizeT inSize = *srcLen;
++ (*srcLen) = 0;
++ LzmaDec_WriteRem(p, dicLimit);
++
++ *status = LZMA_STATUS_NOT_SPECIFIED;
++
++ while (p->remainLen != kMatchSpecLenStart)
++ {
++ int checkEndMarkNow;
++
++ if (p->needFlush != 0)
++ {
++ for (; inSize > 0 && p->tempBufSize < RC_INIT_SIZE; (*srcLen)++, inSize--)
++ p->tempBuf[p->tempBufSize++] = *src++;
++ if (p->tempBufSize < RC_INIT_SIZE)
++ {
++ *status = LZMA_STATUS_NEEDS_MORE_INPUT;
++ return SZ_OK;
++ }
++ if (p->tempBuf[0] != 0)
++ return SZ_ERROR_DATA;
++
++ LzmaDec_InitRc(p, p->tempBuf);
++ p->tempBufSize = 0;
++ }
++
++ checkEndMarkNow = 0;
++ if (p->dicPos >= dicLimit)
++ {
++ if (p->remainLen == 0 && p->code == 0)
++ {
++ *status = LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK;
++ return SZ_OK;
++ }
++ if (finishMode == LZMA_FINISH_ANY)
++ {
++ *status = LZMA_STATUS_NOT_FINISHED;
++ return SZ_OK;
++ }
++ if (p->remainLen != 0)
++ {
++ *status = LZMA_STATUS_NOT_FINISHED;
++ return SZ_ERROR_DATA;
++ }
++ checkEndMarkNow = 1;
++ }
++
++ if (p->needInitState)
++ LzmaDec_InitStateReal(p);
++
++ if (p->tempBufSize == 0)
++ {
++ SizeT processed;
++ const Byte *bufLimit;
++ if (inSize < LZMA_REQUIRED_INPUT_MAX || checkEndMarkNow)
++ {
++ int dummyRes = LzmaDec_TryDummy(p, src, inSize);
++ if (dummyRes == DUMMY_ERROR)
++ {
++ memcpy(p->tempBuf, src, inSize);
++ p->tempBufSize = (unsigned)inSize;
++ (*srcLen) += inSize;
++ *status = LZMA_STATUS_NEEDS_MORE_INPUT;
++ return SZ_OK;
++ }
++ if (checkEndMarkNow && dummyRes != DUMMY_MATCH)
++ {
++ *status = LZMA_STATUS_NOT_FINISHED;
++ return SZ_ERROR_DATA;
++ }
++ bufLimit = src;
++ }
++ else
++ bufLimit = src + inSize - LZMA_REQUIRED_INPUT_MAX;
++ p->buf = src;
++ if (LzmaDec_DecodeReal2(p, dicLimit, bufLimit) != 0)
++ return SZ_ERROR_DATA;
++ processed = (SizeT)(p->buf - src);
++ (*srcLen) += processed;
++ src += processed;
++ inSize -= processed;
++ }
++ else
++ {
++ unsigned rem = p->tempBufSize, lookAhead = 0;
++ while (rem < LZMA_REQUIRED_INPUT_MAX && lookAhead < inSize)
++ p->tempBuf[rem++] = src[lookAhead++];
++ p->tempBufSize = rem;
++ if (rem < LZMA_REQUIRED_INPUT_MAX || checkEndMarkNow)
++ {
++ int dummyRes = LzmaDec_TryDummy(p, p->tempBuf, rem);
++ if (dummyRes == DUMMY_ERROR)
++ {
++ (*srcLen) += lookAhead;
++ *status = LZMA_STATUS_NEEDS_MORE_INPUT;
++ return SZ_OK;
++ }
++ if (checkEndMarkNow && dummyRes != DUMMY_MATCH)
++ {
++ *status = LZMA_STATUS_NOT_FINISHED;
++ return SZ_ERROR_DATA;
++ }
++ }
++ p->buf = p->tempBuf;
++ if (LzmaDec_DecodeReal2(p, dicLimit, p->buf) != 0)
++ return SZ_ERROR_DATA;
++ lookAhead -= (rem - (unsigned)(p->buf - p->tempBuf));
++ (*srcLen) += lookAhead;
++ src += lookAhead;
++ inSize -= lookAhead;
++ p->tempBufSize = 0;
++ }
++ }
++ if (p->code == 0)
++ *status = LZMA_STATUS_FINISHED_WITH_MARK;
++ return (p->code == 0) ? SZ_OK : SZ_ERROR_DATA;
++}
++
++SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status)
++{
++ SizeT outSize = *destLen;
++ SizeT inSize = *srcLen;
++ *srcLen = *destLen = 0;
++ for (;;)
++ {
++ SizeT inSizeCur = inSize, outSizeCur, dicPos;
++ ELzmaFinishMode curFinishMode;
++ SRes res;
++ if (p->dicPos == p->dicBufSize)
++ p->dicPos = 0;
++ dicPos = p->dicPos;
++ if (outSize > p->dicBufSize - dicPos)
++ {
++ outSizeCur = p->dicBufSize;
++ curFinishMode = LZMA_FINISH_ANY;
++ }
++ else
++ {
++ outSizeCur = dicPos + outSize;
++ curFinishMode = finishMode;
++ }
++
++ res = LzmaDec_DecodeToDic(p, outSizeCur, src, &inSizeCur, curFinishMode, status);
++ src += inSizeCur;
++ inSize -= inSizeCur;
++ *srcLen += inSizeCur;
++ outSizeCur = p->dicPos - dicPos;
++ memcpy(dest, p->dic + dicPos, outSizeCur);
++ dest += outSizeCur;
++ outSize -= outSizeCur;
++ *destLen += outSizeCur;
++ if (res != 0)
++ return res;
++ if (outSizeCur == 0 || outSize == 0)
++ return SZ_OK;
++ }
++}
++
++void LzmaDec_FreeProbs(CLzmaDec *p, ISzAlloc *alloc)
++{
++ alloc->Free(alloc, p->probs);
++ p->probs = 0;
++}
++
++static void LzmaDec_FreeDict(CLzmaDec *p, ISzAlloc *alloc)
++{
++ alloc->Free(alloc, p->dic);
++ p->dic = 0;
++}
++
++void LzmaDec_Free(CLzmaDec *p, ISzAlloc *alloc)
++{
++ LzmaDec_FreeProbs(p, alloc);
++ LzmaDec_FreeDict(p, alloc);
++}
++
++SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size)
++{
++ UInt32 dicSize;
++ Byte d;
++
++ if (size < LZMA_PROPS_SIZE)
++ return SZ_ERROR_UNSUPPORTED;
++ else
++ dicSize = data[1] | ((UInt32)data[2] << 8) | ((UInt32)data[3] << 16) | ((UInt32)data[4] << 24);
++
++ if (dicSize < LZMA_DIC_MIN)
++ dicSize = LZMA_DIC_MIN;
++ p->dicSize = dicSize;
++
++ d = data[0];
++ if (d >= (9 * 5 * 5))
++ return SZ_ERROR_UNSUPPORTED;
++
++ p->lc = d % 9;
++ d /= 9;
++ p->pb = d / 5;
++ p->lp = d % 5;
++
++ return SZ_OK;
++}
++
++static SRes LzmaDec_AllocateProbs2(CLzmaDec *p, const CLzmaProps *propNew, ISzAlloc *alloc)
++{
++ UInt32 numProbs = LzmaProps_GetNumProbs(propNew);
++ if (p->probs == 0 || numProbs != p->numProbs)
++ {
++ LzmaDec_FreeProbs(p, alloc);
++ p->probs = (CLzmaProb *)alloc->Alloc(alloc, numProbs * sizeof(CLzmaProb));
++ p->numProbs = numProbs;
++ if (p->probs == 0)
++ return SZ_ERROR_MEM;
++ }
++ return SZ_OK;
++}
++
++SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc)
++{
++ CLzmaProps propNew;
++ RINOK(LzmaProps_Decode(&propNew, props, propsSize));
++ RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc));
++ p->prop = propNew;
++ return SZ_OK;
++}
++
++SRes LzmaDec_Allocate(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc)
++{
++ CLzmaProps propNew;
++ SizeT dicBufSize;
++ RINOK(LzmaProps_Decode(&propNew, props, propsSize));
++ RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc));
++ dicBufSize = propNew.dicSize;
++ if (p->dic == 0 || dicBufSize != p->dicBufSize)
++ {
++ LzmaDec_FreeDict(p, alloc);
++ p->dic = (Byte *)alloc->Alloc(alloc, dicBufSize);
++ if (p->dic == 0)
++ {
++ LzmaDec_FreeProbs(p, alloc);
++ return SZ_ERROR_MEM;
++ }
++ }
++ p->dicBufSize = dicBufSize;
++ p->prop = propNew;
++ return SZ_OK;
++}
++
++SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
++ const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode,
++ ELzmaStatus *status, ISzAlloc *alloc)
++{
++ CLzmaDec p;
++ SRes res;
++ SizeT inSize = *srcLen;
++ SizeT outSize = *destLen;
++ *srcLen = *destLen = 0;
++ if (inSize < RC_INIT_SIZE)
++ return SZ_ERROR_INPUT_EOF;
++
++ LzmaDec_Construct(&p);
++ res = LzmaDec_AllocateProbs(&p, propData, propSize, alloc);
++ if (res != 0)
++ return res;
++ p.dic = dest;
++ p.dicBufSize = outSize;
++
++ LzmaDec_Init(&p);
++
++ *srcLen = inSize;
++ res = LzmaDec_DecodeToDic(&p, outSize, src, srcLen, finishMode, status);
++
++ if (res == SZ_OK && *status == LZMA_STATUS_NEEDS_MORE_INPUT)
++ res = SZ_ERROR_INPUT_EOF;
++
++ (*destLen) = p.dicPos;
++ LzmaDec_FreeProbs(&p, alloc);
++ return res;
++}
+--- /dev/null
++++ b/lib/lzma/LzmaEnc.c
+@@ -0,0 +1,2271 @@
++/* LzmaEnc.c -- LZMA Encoder
++2009-11-24 : Igor Pavlov : Public domain */
++
++#include <string.h>
++
++/* #define SHOW_STAT */
++/* #define SHOW_STAT2 */
++
++#if defined(SHOW_STAT) || defined(SHOW_STAT2)
++#include <stdio.h>
++#endif
++
++#include "LzmaEnc.h"
++
++/* disable MT */
++#define _7ZIP_ST
++
++#include "LzFind.h"
++#ifndef _7ZIP_ST
++#include "LzFindMt.h"
++#endif
++
++#ifdef SHOW_STAT
++static int ttt = 0;
++#endif
++
++#define kBlockSizeMax ((1 << LZMA_NUM_BLOCK_SIZE_BITS) - 1)
++
++#define kBlockSize (9 << 10)
++#define kUnpackBlockSize (1 << 18)
++#define kMatchArraySize (1 << 21)
++#define kMatchRecordMaxSize ((LZMA_MATCH_LEN_MAX * 2 + 3) * LZMA_MATCH_LEN_MAX)
++
++#define kNumMaxDirectBits (31)
++
++#define kNumTopBits 24
++#define kTopValue ((UInt32)1 << kNumTopBits)
++
++#define kNumBitModelTotalBits 11
++#define kBitModelTotal (1 << kNumBitModelTotalBits)
++#define kNumMoveBits 5
++#define kProbInitValue (kBitModelTotal >> 1)
++
++#define kNumMoveReducingBits 4
++#define kNumBitPriceShiftBits 4
++#define kBitPrice (1 << kNumBitPriceShiftBits)
++
++void LzmaEncProps_Init(CLzmaEncProps *p)
++{
++ p->level = 5;
++ p->dictSize = p->mc = 0;
++ p->lc = p->lp = p->pb = p->algo = p->fb = p->btMode = p->numHashBytes = p->numThreads = -1;
++ p->writeEndMark = 0;
++}
++
++void LzmaEncProps_Normalize(CLzmaEncProps *p)
++{
++ int level = p->level;
++ if (level < 0) level = 5;
++ p->level = level;
++ if (p->dictSize == 0) p->dictSize = (level <= 5 ? (1 << (level * 2 + 14)) : (level == 6 ? (1 << 25) : (1 << 26)));
++ if (p->lc < 0) p->lc = 3;
++ if (p->lp < 0) p->lp = 0;
++ if (p->pb < 0) p->pb = 2;
++ if (p->algo < 0) p->algo = (level < 5 ? 0 : 1);
++ if (p->fb < 0) p->fb = (level < 7 ? 32 : 64);
++ if (p->btMode < 0) p->btMode = (p->algo == 0 ? 0 : 1);
++ if (p->numHashBytes < 0) p->numHashBytes = 4;
++ if (p->mc == 0) p->mc = (16 + (p->fb >> 1)) >> (p->btMode ? 0 : 1);
++ if (p->numThreads < 0)
++ p->numThreads =
++ #ifndef _7ZIP_ST
++ ((p->btMode && p->algo) ? 2 : 1);
++ #else
++ 1;
++ #endif
++}
++
++UInt32 LzmaEncProps_GetDictSize(const CLzmaEncProps *props2)
++{
++ CLzmaEncProps props = *props2;
++ LzmaEncProps_Normalize(&props);
++ return props.dictSize;
++}
++
++/* #define LZMA_LOG_BSR */
++/* Define it for Intel's CPU */
++
++
++#ifdef LZMA_LOG_BSR
++
++#define kDicLogSizeMaxCompress 30
++
++#define BSR2_RET(pos, res) { unsigned long i; _BitScanReverse(&i, (pos)); res = (i + i) + ((pos >> (i - 1)) & 1); }
++
++UInt32 GetPosSlot1(UInt32 pos)
++{
++ UInt32 res;
++ BSR2_RET(pos, res);
++ return res;
++}
++#define GetPosSlot2(pos, res) { BSR2_RET(pos, res); }
++#define GetPosSlot(pos, res) { if (pos < 2) res = pos; else BSR2_RET(pos, res); }
++
++#else
++
++#define kNumLogBits (9 + (int)sizeof(size_t) / 2)
++#define kDicLogSizeMaxCompress ((kNumLogBits - 1) * 2 + 7)
++
++void LzmaEnc_FastPosInit(Byte *g_FastPos)
++{
++ int c = 2, slotFast;
++ g_FastPos[0] = 0;
++ g_FastPos[1] = 1;
++
++ for (slotFast = 2; slotFast < kNumLogBits * 2; slotFast++)
++ {
++ UInt32 k = (1 << ((slotFast >> 1) - 1));
++ UInt32 j;
++ for (j = 0; j < k; j++, c++)
++ g_FastPos[c] = (Byte)slotFast;
++ }
++}
++
++#define BSR2_RET(pos, res) { UInt32 i = 6 + ((kNumLogBits - 1) & \
++ (0 - (((((UInt32)1 << (kNumLogBits + 6)) - 1) - pos) >> 31))); \
++ res = p->g_FastPos[pos >> i] + (i * 2); }
++/*
++#define BSR2_RET(pos, res) { res = (pos < (1 << (kNumLogBits + 6))) ? \
++ p->g_FastPos[pos >> 6] + 12 : \
++ p->g_FastPos[pos >> (6 + kNumLogBits - 1)] + (6 + (kNumLogBits - 1)) * 2; }
++*/
++
++#define GetPosSlot1(pos) p->g_FastPos[pos]
++#define GetPosSlot2(pos, res) { BSR2_RET(pos, res); }
++#define GetPosSlot(pos, res) { if (pos < kNumFullDistances) res = p->g_FastPos[pos]; else BSR2_RET(pos, res); }
++
++#endif
++
++
++#define LZMA_NUM_REPS 4
++
++typedef unsigned CState;
++
++typedef struct
++{
++ UInt32 price;
++
++ CState state;
++ int prev1IsChar;
++ int prev2;
++
++ UInt32 posPrev2;
++ UInt32 backPrev2;
++
++ UInt32 posPrev;
++ UInt32 backPrev;
++ UInt32 backs[LZMA_NUM_REPS];
++} COptimal;
++
++#define kNumOpts (1 << 12)
++
++#define kNumLenToPosStates 4
++#define kNumPosSlotBits 6
++#define kDicLogSizeMin 0
++#define kDicLogSizeMax 32
++#define kDistTableSizeMax (kDicLogSizeMax * 2)
++
++
++#define kNumAlignBits 4
++#define kAlignTableSize (1 << kNumAlignBits)
++#define kAlignMask (kAlignTableSize - 1)
++
++#define kStartPosModelIndex 4
++#define kEndPosModelIndex 14
++#define kNumPosModels (kEndPosModelIndex - kStartPosModelIndex)
++
++#define kNumFullDistances (1 << (kEndPosModelIndex >> 1))
++
++#ifdef _LZMA_PROB32
++#define CLzmaProb UInt32
++#else
++#define CLzmaProb UInt16
++#endif
++
++#define LZMA_PB_MAX 4
++#define LZMA_LC_MAX 8
++#define LZMA_LP_MAX 4
++
++#define LZMA_NUM_PB_STATES_MAX (1 << LZMA_PB_MAX)
++
++
++#define kLenNumLowBits 3
++#define kLenNumLowSymbols (1 << kLenNumLowBits)
++#define kLenNumMidBits 3
++#define kLenNumMidSymbols (1 << kLenNumMidBits)
++#define kLenNumHighBits 8
++#define kLenNumHighSymbols (1 << kLenNumHighBits)
++
++#define kLenNumSymbolsTotal (kLenNumLowSymbols + kLenNumMidSymbols + kLenNumHighSymbols)
++
++#define LZMA_MATCH_LEN_MIN 2
++#define LZMA_MATCH_LEN_MAX (LZMA_MATCH_LEN_MIN + kLenNumSymbolsTotal - 1)
++
++#define kNumStates 12
++
++typedef struct
++{
++ CLzmaProb choice;
++ CLzmaProb choice2;
++ CLzmaProb low[LZMA_NUM_PB_STATES_MAX << kLenNumLowBits];
++ CLzmaProb mid[LZMA_NUM_PB_STATES_MAX << kLenNumMidBits];
++ CLzmaProb high[kLenNumHighSymbols];
++} CLenEnc;
++
++typedef struct
++{
++ CLenEnc p;
++ UInt32 prices[LZMA_NUM_PB_STATES_MAX][kLenNumSymbolsTotal];
++ UInt32 tableSize;
++ UInt32 counters[LZMA_NUM_PB_STATES_MAX];
++} CLenPriceEnc;
++
++typedef struct
++{
++ UInt32 range;
++ Byte cache;
++ UInt64 low;
++ UInt64 cacheSize;
++ Byte *buf;
++ Byte *bufLim;
++ Byte *bufBase;
++ ISeqOutStream *outStream;
++ UInt64 processed;
++ SRes res;
++} CRangeEnc;
++
++typedef struct
++{
++ CLzmaProb *litProbs;
++
++ CLzmaProb isMatch[kNumStates][LZMA_NUM_PB_STATES_MAX];
++ CLzmaProb isRep[kNumStates];
++ CLzmaProb isRepG0[kNumStates];
++ CLzmaProb isRepG1[kNumStates];
++ CLzmaProb isRepG2[kNumStates];
++ CLzmaProb isRep0Long[kNumStates][LZMA_NUM_PB_STATES_MAX];
++
++ CLzmaProb posSlotEncoder[kNumLenToPosStates][1 << kNumPosSlotBits];
++ CLzmaProb posEncoders[kNumFullDistances - kEndPosModelIndex];
++ CLzmaProb posAlignEncoder[1 << kNumAlignBits];
++
++ CLenPriceEnc lenEnc;
++ CLenPriceEnc repLenEnc;
++
++ UInt32 reps[LZMA_NUM_REPS];
++ UInt32 state;
++} CSaveState;
++
++typedef struct
++{
++ IMatchFinder matchFinder;
++ void *matchFinderObj;
++
++ #ifndef _7ZIP_ST
++ Bool mtMode;
++ CMatchFinderMt matchFinderMt;
++ #endif
++
++ CMatchFinder matchFinderBase;
++
++ #ifndef _7ZIP_ST
++ Byte pad[128];
++ #endif
++
++ UInt32 optimumEndIndex;
++ UInt32 optimumCurrentIndex;
++
++ UInt32 longestMatchLength;
++ UInt32 numPairs;
++ UInt32 numAvail;
++ COptimal opt[kNumOpts];
++
++ #ifndef LZMA_LOG_BSR
++ Byte g_FastPos[1 << kNumLogBits];
++ #endif
++
++ UInt32 ProbPrices[kBitModelTotal >> kNumMoveReducingBits];
++ UInt32 matches[LZMA_MATCH_LEN_MAX * 2 + 2 + 1];
++ UInt32 numFastBytes;
++ UInt32 additionalOffset;
++ UInt32 reps[LZMA_NUM_REPS];
++ UInt32 state;
++
++ UInt32 posSlotPrices[kNumLenToPosStates][kDistTableSizeMax];
++ UInt32 distancesPrices[kNumLenToPosStates][kNumFullDistances];
++ UInt32 alignPrices[kAlignTableSize];
++ UInt32 alignPriceCount;
++
++ UInt32 distTableSize;
++
++ unsigned lc, lp, pb;
++ unsigned lpMask, pbMask;
++
++ CLzmaProb *litProbs;
++
++ CLzmaProb isMatch[kNumStates][LZMA_NUM_PB_STATES_MAX];
++ CLzmaProb isRep[kNumStates];
++ CLzmaProb isRepG0[kNumStates];
++ CLzmaProb isRepG1[kNumStates];
++ CLzmaProb isRepG2[kNumStates];
++ CLzmaProb isRep0Long[kNumStates][LZMA_NUM_PB_STATES_MAX];
++
++ CLzmaProb posSlotEncoder[kNumLenToPosStates][1 << kNumPosSlotBits];
++ CLzmaProb posEncoders[kNumFullDistances - kEndPosModelIndex];
++ CLzmaProb posAlignEncoder[1 << kNumAlignBits];
++
++ CLenPriceEnc lenEnc;
++ CLenPriceEnc repLenEnc;
++
++ unsigned lclp;
++
++ Bool fastMode;
++
++ CRangeEnc rc;
++
++ Bool writeEndMark;
++ UInt64 nowPos64;
++ UInt32 matchPriceCount;
++ Bool finished;
++ Bool multiThread;
++
++ SRes result;
++ UInt32 dictSize;
++ UInt32 matchFinderCycles;
++
++ int needInit;
++
++ CSaveState saveState;
++} CLzmaEnc;
++
++void LzmaEnc_SaveState(CLzmaEncHandle pp)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ CSaveState *dest = &p->saveState;
++ int i;
++ dest->lenEnc = p->lenEnc;
++ dest->repLenEnc = p->repLenEnc;
++ dest->state = p->state;
++
++ for (i = 0; i < kNumStates; i++)
++ {
++ memcpy(dest->isMatch[i], p->isMatch[i], sizeof(p->isMatch[i]));
++ memcpy(dest->isRep0Long[i], p->isRep0Long[i], sizeof(p->isRep0Long[i]));
++ }
++ for (i = 0; i < kNumLenToPosStates; i++)
++ memcpy(dest->posSlotEncoder[i], p->posSlotEncoder[i], sizeof(p->posSlotEncoder[i]));
++ memcpy(dest->isRep, p->isRep, sizeof(p->isRep));
++ memcpy(dest->isRepG0, p->isRepG0, sizeof(p->isRepG0));
++ memcpy(dest->isRepG1, p->isRepG1, sizeof(p->isRepG1));
++ memcpy(dest->isRepG2, p->isRepG2, sizeof(p->isRepG2));
++ memcpy(dest->posEncoders, p->posEncoders, sizeof(p->posEncoders));
++ memcpy(dest->posAlignEncoder, p->posAlignEncoder, sizeof(p->posAlignEncoder));
++ memcpy(dest->reps, p->reps, sizeof(p->reps));
++ memcpy(dest->litProbs, p->litProbs, (0x300 << p->lclp) * sizeof(CLzmaProb));
++}
++
++void LzmaEnc_RestoreState(CLzmaEncHandle pp)
++{
++ CLzmaEnc *dest = (CLzmaEnc *)pp;
++ const CSaveState *p = &dest->saveState;
++ int i;
++ dest->lenEnc = p->lenEnc;
++ dest->repLenEnc = p->repLenEnc;
++ dest->state = p->state;
++
++ for (i = 0; i < kNumStates; i++)
++ {
++ memcpy(dest->isMatch[i], p->isMatch[i], sizeof(p->isMatch[i]));
++ memcpy(dest->isRep0Long[i], p->isRep0Long[i], sizeof(p->isRep0Long[i]));
++ }
++ for (i = 0; i < kNumLenToPosStates; i++)
++ memcpy(dest->posSlotEncoder[i], p->posSlotEncoder[i], sizeof(p->posSlotEncoder[i]));
++ memcpy(dest->isRep, p->isRep, sizeof(p->isRep));
++ memcpy(dest->isRepG0, p->isRepG0, sizeof(p->isRepG0));
++ memcpy(dest->isRepG1, p->isRepG1, sizeof(p->isRepG1));
++ memcpy(dest->isRepG2, p->isRepG2, sizeof(p->isRepG2));
++ memcpy(dest->posEncoders, p->posEncoders, sizeof(p->posEncoders));
++ memcpy(dest->posAlignEncoder, p->posAlignEncoder, sizeof(p->posAlignEncoder));
++ memcpy(dest->reps, p->reps, sizeof(p->reps));
++ memcpy(dest->litProbs, p->litProbs, (0x300 << dest->lclp) * sizeof(CLzmaProb));
++}
++
++SRes LzmaEnc_SetProps(CLzmaEncHandle pp, const CLzmaEncProps *props2)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ CLzmaEncProps props = *props2;
++ LzmaEncProps_Normalize(&props);
++
++ if (props.lc > LZMA_LC_MAX || props.lp > LZMA_LP_MAX || props.pb > LZMA_PB_MAX ||
++ props.dictSize > (1 << kDicLogSizeMaxCompress) || props.dictSize > (1 << 30))
++ return SZ_ERROR_PARAM;
++ p->dictSize = props.dictSize;
++ p->matchFinderCycles = props.mc;
++ {
++ unsigned fb = props.fb;
++ if (fb < 5)
++ fb = 5;
++ if (fb > LZMA_MATCH_LEN_MAX)
++ fb = LZMA_MATCH_LEN_MAX;
++ p->numFastBytes = fb;
++ }
++ p->lc = props.lc;
++ p->lp = props.lp;
++ p->pb = props.pb;
++ p->fastMode = (props.algo == 0);
++ p->matchFinderBase.btMode = props.btMode;
++ {
++ UInt32 numHashBytes = 4;
++ if (props.btMode)
++ {
++ if (props.numHashBytes < 2)
++ numHashBytes = 2;
++ else if (props.numHashBytes < 4)
++ numHashBytes = props.numHashBytes;
++ }
++ p->matchFinderBase.numHashBytes = numHashBytes;
++ }
++
++ p->matchFinderBase.cutValue = props.mc;
++
++ p->writeEndMark = props.writeEndMark;
++
++ #ifndef _7ZIP_ST
++ /*
++ if (newMultiThread != _multiThread)
++ {
++ ReleaseMatchFinder();
++ _multiThread = newMultiThread;
++ }
++ */
++ p->multiThread = (props.numThreads > 1);
++ #endif
++
++ return SZ_OK;
++}
++
++static const int kLiteralNextStates[kNumStates] = {0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 4, 5};
++static const int kMatchNextStates[kNumStates] = {7, 7, 7, 7, 7, 7, 7, 10, 10, 10, 10, 10};
++static const int kRepNextStates[kNumStates] = {8, 8, 8, 8, 8, 8, 8, 11, 11, 11, 11, 11};
++static const int kShortRepNextStates[kNumStates]= {9, 9, 9, 9, 9, 9, 9, 11, 11, 11, 11, 11};
++
++#define IsCharState(s) ((s) < 7)
++
++#define GetLenToPosState(len) (((len) < kNumLenToPosStates + 1) ? (len) - 2 : kNumLenToPosStates - 1)
++
++#define kInfinityPrice (1 << 30)
++
++static void RangeEnc_Construct(CRangeEnc *p)
++{
++ p->outStream = 0;
++ p->bufBase = 0;
++}
++
++#define RangeEnc_GetProcessed(p) ((p)->processed + ((p)->buf - (p)->bufBase) + (p)->cacheSize)
++
++#define RC_BUF_SIZE (1 << 16)
++static int RangeEnc_Alloc(CRangeEnc *p, ISzAlloc *alloc)
++{
++ if (p->bufBase == 0)
++ {
++ p->bufBase = (Byte *)alloc->Alloc(alloc, RC_BUF_SIZE);
++ if (p->bufBase == 0)
++ return 0;
++ p->bufLim = p->bufBase + RC_BUF_SIZE;
++ }
++ return 1;
++}
++
++static void RangeEnc_Free(CRangeEnc *p, ISzAlloc *alloc)
++{
++ alloc->Free(alloc, p->bufBase);
++ p->bufBase = 0;
++}
++
++static void RangeEnc_Init(CRangeEnc *p)
++{
++ /* Stream.Init(); */
++ p->low = 0;
++ p->range = 0xFFFFFFFF;
++ p->cacheSize = 1;
++ p->cache = 0;
++
++ p->buf = p->bufBase;
++
++ p->processed = 0;
++ p->res = SZ_OK;
++}
++
++static void RangeEnc_FlushStream(CRangeEnc *p)
++{
++ size_t num;
++ if (p->res != SZ_OK)
++ return;
++ num = p->buf - p->bufBase;
++ if (num != p->outStream->Write(p->outStream, p->bufBase, num))
++ p->res = SZ_ERROR_WRITE;
++ p->processed += num;
++ p->buf = p->bufBase;
++}
++
++static void MY_FAST_CALL RangeEnc_ShiftLow(CRangeEnc *p)
++{
++ if ((UInt32)p->low < (UInt32)0xFF000000 || (int)(p->low >> 32) != 0)
++ {
++ Byte temp = p->cache;
++ do
++ {
++ Byte *buf = p->buf;
++ *buf++ = (Byte)(temp + (Byte)(p->low >> 32));
++ p->buf = buf;
++ if (buf == p->bufLim)
++ RangeEnc_FlushStream(p);
++ temp = 0xFF;
++ }
++ while (--p->cacheSize != 0);
++ p->cache = (Byte)((UInt32)p->low >> 24);
++ }
++ p->cacheSize++;
++ p->low = (UInt32)p->low << 8;
++}
++
++static void RangeEnc_FlushData(CRangeEnc *p)
++{
++ int i;
++ for (i = 0; i < 5; i++)
++ RangeEnc_ShiftLow(p);
++}
++
++static void RangeEnc_EncodeDirectBits(CRangeEnc *p, UInt32 value, int numBits)
++{
++ do
++ {
++ p->range >>= 1;
++ p->low += p->range & (0 - ((value >> --numBits) & 1));
++ if (p->range < kTopValue)
++ {
++ p->range <<= 8;
++ RangeEnc_ShiftLow(p);
++ }
++ }
++ while (numBits != 0);
++}
++
++static void RangeEnc_EncodeBit(CRangeEnc *p, CLzmaProb *prob, UInt32 symbol)
++{
++ UInt32 ttt = *prob;
++ UInt32 newBound = (p->range >> kNumBitModelTotalBits) * ttt;
++ if (symbol == 0)
++ {
++ p->range = newBound;
++ ttt += (kBitModelTotal - ttt) >> kNumMoveBits;
++ }
++ else
++ {
++ p->low += newBound;
++ p->range -= newBound;
++ ttt -= ttt >> kNumMoveBits;
++ }
++ *prob = (CLzmaProb)ttt;
++ if (p->range < kTopValue)
++ {
++ p->range <<= 8;
++ RangeEnc_ShiftLow(p);
++ }
++}
++
++static void LitEnc_Encode(CRangeEnc *p, CLzmaProb *probs, UInt32 symbol)
++{
++ symbol |= 0x100;
++ do
++ {
++ RangeEnc_EncodeBit(p, probs + (symbol >> 8), (symbol >> 7) & 1);
++ symbol <<= 1;
++ }
++ while (symbol < 0x10000);
++}
++
++static void LitEnc_EncodeMatched(CRangeEnc *p, CLzmaProb *probs, UInt32 symbol, UInt32 matchByte)
++{
++ UInt32 offs = 0x100;
++ symbol |= 0x100;
++ do
++ {
++ matchByte <<= 1;
++ RangeEnc_EncodeBit(p, probs + (offs + (matchByte & offs) + (symbol >> 8)), (symbol >> 7) & 1);
++ symbol <<= 1;
++ offs &= ~(matchByte ^ symbol);
++ }
++ while (symbol < 0x10000);
++}
++
++void LzmaEnc_InitPriceTables(UInt32 *ProbPrices)
++{
++ UInt32 i;
++ for (i = (1 << kNumMoveReducingBits) / 2; i < kBitModelTotal; i += (1 << kNumMoveReducingBits))
++ {
++ const int kCyclesBits = kNumBitPriceShiftBits;
++ UInt32 w = i;
++ UInt32 bitCount = 0;
++ int j;
++ for (j = 0; j < kCyclesBits; j++)
++ {
++ w = w * w;
++ bitCount <<= 1;
++ while (w >= ((UInt32)1 << 16))
++ {
++ w >>= 1;
++ bitCount++;
++ }
++ }
++ ProbPrices[i >> kNumMoveReducingBits] = ((kNumBitModelTotalBits << kCyclesBits) - 15 - bitCount);
++ }
++}
++
++
++#define GET_PRICE(prob, symbol) \
++ p->ProbPrices[((prob) ^ (((-(int)(symbol))) & (kBitModelTotal - 1))) >> kNumMoveReducingBits];
++
++#define GET_PRICEa(prob, symbol) \
++ ProbPrices[((prob) ^ ((-((int)(symbol))) & (kBitModelTotal - 1))) >> kNumMoveReducingBits];
++
++#define GET_PRICE_0(prob) p->ProbPrices[(prob) >> kNumMoveReducingBits]
++#define GET_PRICE_1(prob) p->ProbPrices[((prob) ^ (kBitModelTotal - 1)) >> kNumMoveReducingBits]
++
++#define GET_PRICE_0a(prob) ProbPrices[(prob) >> kNumMoveReducingBits]
++#define GET_PRICE_1a(prob) ProbPrices[((prob) ^ (kBitModelTotal - 1)) >> kNumMoveReducingBits]
++
++static UInt32 LitEnc_GetPrice(const CLzmaProb *probs, UInt32 symbol, UInt32 *ProbPrices)
++{
++ UInt32 price = 0;
++ symbol |= 0x100;
++ do
++ {
++ price += GET_PRICEa(probs[symbol >> 8], (symbol >> 7) & 1);
++ symbol <<= 1;
++ }
++ while (symbol < 0x10000);
++ return price;
++}
++
++static UInt32 LitEnc_GetPriceMatched(const CLzmaProb *probs, UInt32 symbol, UInt32 matchByte, UInt32 *ProbPrices)
++{
++ UInt32 price = 0;
++ UInt32 offs = 0x100;
++ symbol |= 0x100;
++ do
++ {
++ matchByte <<= 1;
++ price += GET_PRICEa(probs[offs + (matchByte & offs) + (symbol >> 8)], (symbol >> 7) & 1);
++ symbol <<= 1;
++ offs &= ~(matchByte ^ symbol);
++ }
++ while (symbol < 0x10000);
++ return price;
++}
++
++
++static void RcTree_Encode(CRangeEnc *rc, CLzmaProb *probs, int numBitLevels, UInt32 symbol)
++{
++ UInt32 m = 1;
++ int i;
++ for (i = numBitLevels; i != 0;)
++ {
++ UInt32 bit;
++ i--;
++ bit = (symbol >> i) & 1;
++ RangeEnc_EncodeBit(rc, probs + m, bit);
++ m = (m << 1) | bit;
++ }
++}
++
++static void RcTree_ReverseEncode(CRangeEnc *rc, CLzmaProb *probs, int numBitLevels, UInt32 symbol)
++{
++ UInt32 m = 1;
++ int i;
++ for (i = 0; i < numBitLevels; i++)
++ {
++ UInt32 bit = symbol & 1;
++ RangeEnc_EncodeBit(rc, probs + m, bit);
++ m = (m << 1) | bit;
++ symbol >>= 1;
++ }
++}
++
++static UInt32 RcTree_GetPrice(const CLzmaProb *probs, int numBitLevels, UInt32 symbol, UInt32 *ProbPrices)
++{
++ UInt32 price = 0;
++ symbol |= (1 << numBitLevels);
++ while (symbol != 1)
++ {
++ price += GET_PRICEa(probs[symbol >> 1], symbol & 1);
++ symbol >>= 1;
++ }
++ return price;
++}
++
++static UInt32 RcTree_ReverseGetPrice(const CLzmaProb *probs, int numBitLevels, UInt32 symbol, UInt32 *ProbPrices)
++{
++ UInt32 price = 0;
++ UInt32 m = 1;
++ int i;
++ for (i = numBitLevels; i != 0; i--)
++ {
++ UInt32 bit = symbol & 1;
++ symbol >>= 1;
++ price += GET_PRICEa(probs[m], bit);
++ m = (m << 1) | bit;
++ }
++ return price;
++}
++
++
++static void LenEnc_Init(CLenEnc *p)
++{
++ unsigned i;
++ p->choice = p->choice2 = kProbInitValue;
++ for (i = 0; i < (LZMA_NUM_PB_STATES_MAX << kLenNumLowBits); i++)
++ p->low[i] = kProbInitValue;
++ for (i = 0; i < (LZMA_NUM_PB_STATES_MAX << kLenNumMidBits); i++)
++ p->mid[i] = kProbInitValue;
++ for (i = 0; i < kLenNumHighSymbols; i++)
++ p->high[i] = kProbInitValue;
++}
++
++static void LenEnc_Encode(CLenEnc *p, CRangeEnc *rc, UInt32 symbol, UInt32 posState)
++{
++ if (symbol < kLenNumLowSymbols)
++ {
++ RangeEnc_EncodeBit(rc, &p->choice, 0);
++ RcTree_Encode(rc, p->low + (posState << kLenNumLowBits), kLenNumLowBits, symbol);
++ }
++ else
++ {
++ RangeEnc_EncodeBit(rc, &p->choice, 1);
++ if (symbol < kLenNumLowSymbols + kLenNumMidSymbols)
++ {
++ RangeEnc_EncodeBit(rc, &p->choice2, 0);
++ RcTree_Encode(rc, p->mid + (posState << kLenNumMidBits), kLenNumMidBits, symbol - kLenNumLowSymbols);
++ }
++ else
++ {
++ RangeEnc_EncodeBit(rc, &p->choice2, 1);
++ RcTree_Encode(rc, p->high, kLenNumHighBits, symbol - kLenNumLowSymbols - kLenNumMidSymbols);
++ }
++ }
++}
++
++static void LenEnc_SetPrices(CLenEnc *p, UInt32 posState, UInt32 numSymbols, UInt32 *prices, UInt32 *ProbPrices)
++{
++ UInt32 a0 = GET_PRICE_0a(p->choice);
++ UInt32 a1 = GET_PRICE_1a(p->choice);
++ UInt32 b0 = a1 + GET_PRICE_0a(p->choice2);
++ UInt32 b1 = a1 + GET_PRICE_1a(p->choice2);
++ UInt32 i = 0;
++ for (i = 0; i < kLenNumLowSymbols; i++)
++ {
++ if (i >= numSymbols)
++ return;
++ prices[i] = a0 + RcTree_GetPrice(p->low + (posState << kLenNumLowBits), kLenNumLowBits, i, ProbPrices);
++ }
++ for (; i < kLenNumLowSymbols + kLenNumMidSymbols; i++)
++ {
++ if (i >= numSymbols)
++ return;
++ prices[i] = b0 + RcTree_GetPrice(p->mid + (posState << kLenNumMidBits), kLenNumMidBits, i - kLenNumLowSymbols, ProbPrices);
++ }
++ for (; i < numSymbols; i++)
++ prices[i] = b1 + RcTree_GetPrice(p->high, kLenNumHighBits, i - kLenNumLowSymbols - kLenNumMidSymbols, ProbPrices);
++}
++
++static void MY_FAST_CALL LenPriceEnc_UpdateTable(CLenPriceEnc *p, UInt32 posState, UInt32 *ProbPrices)
++{
++ LenEnc_SetPrices(&p->p, posState, p->tableSize, p->prices[posState], ProbPrices);
++ p->counters[posState] = p->tableSize;
++}
++
++static void LenPriceEnc_UpdateTables(CLenPriceEnc *p, UInt32 numPosStates, UInt32 *ProbPrices)
++{
++ UInt32 posState;
++ for (posState = 0; posState < numPosStates; posState++)
++ LenPriceEnc_UpdateTable(p, posState, ProbPrices);
++}
++
++static void LenEnc_Encode2(CLenPriceEnc *p, CRangeEnc *rc, UInt32 symbol, UInt32 posState, Bool updatePrice, UInt32 *ProbPrices)
++{
++ LenEnc_Encode(&p->p, rc, symbol, posState);
++ if (updatePrice)
++ if (--p->counters[posState] == 0)
++ LenPriceEnc_UpdateTable(p, posState, ProbPrices);
++}
++
++
++
++
++static void MovePos(CLzmaEnc *p, UInt32 num)
++{
++ #ifdef SHOW_STAT
++ ttt += num;
++ printf("\n MovePos %d", num);
++ #endif
++ if (num != 0)
++ {
++ p->additionalOffset += num;
++ p->matchFinder.Skip(p->matchFinderObj, num);
++ }
++}
++
++static UInt32 ReadMatchDistances(CLzmaEnc *p, UInt32 *numDistancePairsRes)
++{
++ UInt32 lenRes = 0, numPairs;
++ p->numAvail = p->matchFinder.GetNumAvailableBytes(p->matchFinderObj);
++ numPairs = p->matchFinder.GetMatches(p->matchFinderObj, p->matches);
++ #ifdef SHOW_STAT
++ printf("\n i = %d numPairs = %d ", ttt, numPairs / 2);
++ ttt++;
++ {
++ UInt32 i;
++ for (i = 0; i < numPairs; i += 2)
++ printf("%2d %6d | ", p->matches[i], p->matches[i + 1]);
++ }
++ #endif
++ if (numPairs > 0)
++ {
++ lenRes = p->matches[numPairs - 2];
++ if (lenRes == p->numFastBytes)
++ {
++ const Byte *pby = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
++ UInt32 distance = p->matches[numPairs - 1] + 1;
++ UInt32 numAvail = p->numAvail;
++ if (numAvail > LZMA_MATCH_LEN_MAX)
++ numAvail = LZMA_MATCH_LEN_MAX;
++ {
++ const Byte *pby2 = pby - distance;
++ for (; lenRes < numAvail && pby[lenRes] == pby2[lenRes]; lenRes++);
++ }
++ }
++ }
++ p->additionalOffset++;
++ *numDistancePairsRes = numPairs;
++ return lenRes;
++}
++
++
++#define MakeAsChar(p) (p)->backPrev = (UInt32)(-1); (p)->prev1IsChar = False;
++#define MakeAsShortRep(p) (p)->backPrev = 0; (p)->prev1IsChar = False;
++#define IsShortRep(p) ((p)->backPrev == 0)
++
++static UInt32 GetRepLen1Price(CLzmaEnc *p, UInt32 state, UInt32 posState)
++{
++ return
++ GET_PRICE_0(p->isRepG0[state]) +
++ GET_PRICE_0(p->isRep0Long[state][posState]);
++}
++
++static UInt32 GetPureRepPrice(CLzmaEnc *p, UInt32 repIndex, UInt32 state, UInt32 posState)
++{
++ UInt32 price;
++ if (repIndex == 0)
++ {
++ price = GET_PRICE_0(p->isRepG0[state]);
++ price += GET_PRICE_1(p->isRep0Long[state][posState]);
++ }
++ else
++ {
++ price = GET_PRICE_1(p->isRepG0[state]);
++ if (repIndex == 1)
++ price += GET_PRICE_0(p->isRepG1[state]);
++ else
++ {
++ price += GET_PRICE_1(p->isRepG1[state]);
++ price += GET_PRICE(p->isRepG2[state], repIndex - 2);
++ }
++ }
++ return price;
++}
++
++static UInt32 GetRepPrice(CLzmaEnc *p, UInt32 repIndex, UInt32 len, UInt32 state, UInt32 posState)
++{
++ return p->repLenEnc.prices[posState][len - LZMA_MATCH_LEN_MIN] +
++ GetPureRepPrice(p, repIndex, state, posState);
++}
++
++static UInt32 Backward(CLzmaEnc *p, UInt32 *backRes, UInt32 cur)
++{
++ UInt32 posMem = p->opt[cur].posPrev;
++ UInt32 backMem = p->opt[cur].backPrev;
++ p->optimumEndIndex = cur;
++ do
++ {
++ if (p->opt[cur].prev1IsChar)
++ {
++ MakeAsChar(&p->opt[posMem])
++ p->opt[posMem].posPrev = posMem - 1;
++ if (p->opt[cur].prev2)
++ {
++ p->opt[posMem - 1].prev1IsChar = False;
++ p->opt[posMem - 1].posPrev = p->opt[cur].posPrev2;
++ p->opt[posMem - 1].backPrev = p->opt[cur].backPrev2;
++ }
++ }
++ {
++ UInt32 posPrev = posMem;
++ UInt32 backCur = backMem;
++
++ backMem = p->opt[posPrev].backPrev;
++ posMem = p->opt[posPrev].posPrev;
++
++ p->opt[posPrev].backPrev = backCur;
++ p->opt[posPrev].posPrev = cur;
++ cur = posPrev;
++ }
++ }
++ while (cur != 0);
++ *backRes = p->opt[0].backPrev;
++ p->optimumCurrentIndex = p->opt[0].posPrev;
++ return p->optimumCurrentIndex;
++}
++
++#define LIT_PROBS(pos, prevByte) (p->litProbs + ((((pos) & p->lpMask) << p->lc) + ((prevByte) >> (8 - p->lc))) * 0x300)
++
++static UInt32 GetOptimum(CLzmaEnc *p, UInt32 position, UInt32 *backRes)
++{
++ UInt32 numAvail, mainLen, numPairs, repMaxIndex, i, posState, lenEnd, len, cur;
++ UInt32 matchPrice, repMatchPrice, normalMatchPrice;
++ UInt32 reps[LZMA_NUM_REPS], repLens[LZMA_NUM_REPS];
++ UInt32 *matches;
++ const Byte *data;
++ Byte curByte, matchByte;
++ if (p->optimumEndIndex != p->optimumCurrentIndex)
++ {
++ const COptimal *opt = &p->opt[p->optimumCurrentIndex];
++ UInt32 lenRes = opt->posPrev - p->optimumCurrentIndex;
++ *backRes = opt->backPrev;
++ p->optimumCurrentIndex = opt->posPrev;
++ return lenRes;
++ }
++ p->optimumCurrentIndex = p->optimumEndIndex = 0;
++
++ if (p->additionalOffset == 0)
++ mainLen = ReadMatchDistances(p, &numPairs);
++ else
++ {
++ mainLen = p->longestMatchLength;
++ numPairs = p->numPairs;
++ }
++
++ numAvail = p->numAvail;
++ if (numAvail < 2)
++ {
++ *backRes = (UInt32)(-1);
++ return 1;
++ }
++ if (numAvail > LZMA_MATCH_LEN_MAX)
++ numAvail = LZMA_MATCH_LEN_MAX;
++
++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
++ repMaxIndex = 0;
++ for (i = 0; i < LZMA_NUM_REPS; i++)
++ {
++ UInt32 lenTest;
++ const Byte *data2;
++ reps[i] = p->reps[i];
++ data2 = data - (reps[i] + 1);
++ if (data[0] != data2[0] || data[1] != data2[1])
++ {
++ repLens[i] = 0;
++ continue;
++ }
++ for (lenTest = 2; lenTest < numAvail && data[lenTest] == data2[lenTest]; lenTest++);
++ repLens[i] = lenTest;
++ if (lenTest > repLens[repMaxIndex])
++ repMaxIndex = i;
++ }
++ if (repLens[repMaxIndex] >= p->numFastBytes)
++ {
++ UInt32 lenRes;
++ *backRes = repMaxIndex;
++ lenRes = repLens[repMaxIndex];
++ MovePos(p, lenRes - 1);
++ return lenRes;
++ }
++
++ matches = p->matches;
++ if (mainLen >= p->numFastBytes)
++ {
++ *backRes = matches[numPairs - 1] + LZMA_NUM_REPS;
++ MovePos(p, mainLen - 1);
++ return mainLen;
++ }
++ curByte = *data;
++ matchByte = *(data - (reps[0] + 1));
++
++ if (mainLen < 2 && curByte != matchByte && repLens[repMaxIndex] < 2)
++ {
++ *backRes = (UInt32)-1;
++ return 1;
++ }
++
++ p->opt[0].state = (CState)p->state;
++
++ posState = (position & p->pbMask);
++
++ {
++ const CLzmaProb *probs = LIT_PROBS(position, *(data - 1));
++ p->opt[1].price = GET_PRICE_0(p->isMatch[p->state][posState]) +
++ (!IsCharState(p->state) ?
++ LitEnc_GetPriceMatched(probs, curByte, matchByte, p->ProbPrices) :
++ LitEnc_GetPrice(probs, curByte, p->ProbPrices));
++ }
++
++ MakeAsChar(&p->opt[1]);
++
++ matchPrice = GET_PRICE_1(p->isMatch[p->state][posState]);
++ repMatchPrice = matchPrice + GET_PRICE_1(p->isRep[p->state]);
++
++ if (matchByte == curByte)
++ {
++ UInt32 shortRepPrice = repMatchPrice + GetRepLen1Price(p, p->state, posState);
++ if (shortRepPrice < p->opt[1].price)
++ {
++ p->opt[1].price = shortRepPrice;
++ MakeAsShortRep(&p->opt[1]);
++ }
++ }
++ lenEnd = ((mainLen >= repLens[repMaxIndex]) ? mainLen : repLens[repMaxIndex]);
++
++ if (lenEnd < 2)
++ {
++ *backRes = p->opt[1].backPrev;
++ return 1;
++ }
++
++ p->opt[1].posPrev = 0;
++ for (i = 0; i < LZMA_NUM_REPS; i++)
++ p->opt[0].backs[i] = reps[i];
++
++ len = lenEnd;
++ do
++ p->opt[len--].price = kInfinityPrice;
++ while (len >= 2);
++
++ for (i = 0; i < LZMA_NUM_REPS; i++)
++ {
++ UInt32 repLen = repLens[i];
++ UInt32 price;
++ if (repLen < 2)
++ continue;
++ price = repMatchPrice + GetPureRepPrice(p, i, p->state, posState);
++ do
++ {
++ UInt32 curAndLenPrice = price + p->repLenEnc.prices[posState][repLen - 2];
++ COptimal *opt = &p->opt[repLen];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = 0;
++ opt->backPrev = i;
++ opt->prev1IsChar = False;
++ }
++ }
++ while (--repLen >= 2);
++ }
++
++ normalMatchPrice = matchPrice + GET_PRICE_0(p->isRep[p->state]);
++
++ len = ((repLens[0] >= 2) ? repLens[0] + 1 : 2);
++ if (len <= mainLen)
++ {
++ UInt32 offs = 0;
++ while (len > matches[offs])
++ offs += 2;
++ for (; ; len++)
++ {
++ COptimal *opt;
++ UInt32 distance = matches[offs + 1];
++
++ UInt32 curAndLenPrice = normalMatchPrice + p->lenEnc.prices[posState][len - LZMA_MATCH_LEN_MIN];
++ UInt32 lenToPosState = GetLenToPosState(len);
++ if (distance < kNumFullDistances)
++ curAndLenPrice += p->distancesPrices[lenToPosState][distance];
++ else
++ {
++ UInt32 slot;
++ GetPosSlot2(distance, slot);
++ curAndLenPrice += p->alignPrices[distance & kAlignMask] + p->posSlotPrices[lenToPosState][slot];
++ }
++ opt = &p->opt[len];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = 0;
++ opt->backPrev = distance + LZMA_NUM_REPS;
++ opt->prev1IsChar = False;
++ }
++ if (len == matches[offs])
++ {
++ offs += 2;
++ if (offs == numPairs)
++ break;
++ }
++ }
++ }
++
++ cur = 0;
++
++ #ifdef SHOW_STAT2
++ if (position >= 0)
++ {
++ unsigned i;
++ printf("\n pos = %4X", position);
++ for (i = cur; i <= lenEnd; i++)
++ printf("\nprice[%4X] = %d", position - cur + i, p->opt[i].price);
++ }
++ #endif
++
++ for (;;)
++ {
++ UInt32 numAvailFull, newLen, numPairs, posPrev, state, posState, startLen;
++ UInt32 curPrice, curAnd1Price, matchPrice, repMatchPrice;
++ Bool nextIsChar;
++ Byte curByte, matchByte;
++ const Byte *data;
++ COptimal *curOpt;
++ COptimal *nextOpt;
++
++ cur++;
++ if (cur == lenEnd)
++ return Backward(p, backRes, cur);
++
++ newLen = ReadMatchDistances(p, &numPairs);
++ if (newLen >= p->numFastBytes)
++ {
++ p->numPairs = numPairs;
++ p->longestMatchLength = newLen;
++ return Backward(p, backRes, cur);
++ }
++ position++;
++ curOpt = &p->opt[cur];
++ posPrev = curOpt->posPrev;
++ if (curOpt->prev1IsChar)
++ {
++ posPrev--;
++ if (curOpt->prev2)
++ {
++ state = p->opt[curOpt->posPrev2].state;
++ if (curOpt->backPrev2 < LZMA_NUM_REPS)
++ state = kRepNextStates[state];
++ else
++ state = kMatchNextStates[state];
++ }
++ else
++ state = p->opt[posPrev].state;
++ state = kLiteralNextStates[state];
++ }
++ else
++ state = p->opt[posPrev].state;
++ if (posPrev == cur - 1)
++ {
++ if (IsShortRep(curOpt))
++ state = kShortRepNextStates[state];
++ else
++ state = kLiteralNextStates[state];
++ }
++ else
++ {
++ UInt32 pos;
++ const COptimal *prevOpt;
++ if (curOpt->prev1IsChar && curOpt->prev2)
++ {
++ posPrev = curOpt->posPrev2;
++ pos = curOpt->backPrev2;
++ state = kRepNextStates[state];
++ }
++ else
++ {
++ pos = curOpt->backPrev;
++ if (pos < LZMA_NUM_REPS)
++ state = kRepNextStates[state];
++ else
++ state = kMatchNextStates[state];
++ }
++ prevOpt = &p->opt[posPrev];
++ if (pos < LZMA_NUM_REPS)
++ {
++ UInt32 i;
++ reps[0] = prevOpt->backs[pos];
++ for (i = 1; i <= pos; i++)
++ reps[i] = prevOpt->backs[i - 1];
++ for (; i < LZMA_NUM_REPS; i++)
++ reps[i] = prevOpt->backs[i];
++ }
++ else
++ {
++ UInt32 i;
++ reps[0] = (pos - LZMA_NUM_REPS);
++ for (i = 1; i < LZMA_NUM_REPS; i++)
++ reps[i] = prevOpt->backs[i - 1];
++ }
++ }
++ curOpt->state = (CState)state;
++
++ curOpt->backs[0] = reps[0];
++ curOpt->backs[1] = reps[1];
++ curOpt->backs[2] = reps[2];
++ curOpt->backs[3] = reps[3];
++
++ curPrice = curOpt->price;
++ nextIsChar = False;
++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
++ curByte = *data;
++ matchByte = *(data - (reps[0] + 1));
++
++ posState = (position & p->pbMask);
++
++ curAnd1Price = curPrice + GET_PRICE_0(p->isMatch[state][posState]);
++ {
++ const CLzmaProb *probs = LIT_PROBS(position, *(data - 1));
++ curAnd1Price +=
++ (!IsCharState(state) ?
++ LitEnc_GetPriceMatched(probs, curByte, matchByte, p->ProbPrices) :
++ LitEnc_GetPrice(probs, curByte, p->ProbPrices));
++ }
++
++ nextOpt = &p->opt[cur + 1];
++
++ if (curAnd1Price < nextOpt->price)
++ {
++ nextOpt->price = curAnd1Price;
++ nextOpt->posPrev = cur;
++ MakeAsChar(nextOpt);
++ nextIsChar = True;
++ }
++
++ matchPrice = curPrice + GET_PRICE_1(p->isMatch[state][posState]);
++ repMatchPrice = matchPrice + GET_PRICE_1(p->isRep[state]);
++
++ if (matchByte == curByte && !(nextOpt->posPrev < cur && nextOpt->backPrev == 0))
++ {
++ UInt32 shortRepPrice = repMatchPrice + GetRepLen1Price(p, state, posState);
++ if (shortRepPrice <= nextOpt->price)
++ {
++ nextOpt->price = shortRepPrice;
++ nextOpt->posPrev = cur;
++ MakeAsShortRep(nextOpt);
++ nextIsChar = True;
++ }
++ }
++ numAvailFull = p->numAvail;
++ {
++ UInt32 temp = kNumOpts - 1 - cur;
++ if (temp < numAvailFull)
++ numAvailFull = temp;
++ }
++
++ if (numAvailFull < 2)
++ continue;
++ numAvail = (numAvailFull <= p->numFastBytes ? numAvailFull : p->numFastBytes);
++
++ if (!nextIsChar && matchByte != curByte) /* speed optimization */
++ {
++ /* try Literal + rep0 */
++ UInt32 temp;
++ UInt32 lenTest2;
++ const Byte *data2 = data - (reps[0] + 1);
++ UInt32 limit = p->numFastBytes + 1;
++ if (limit > numAvailFull)
++ limit = numAvailFull;
++
++ for (temp = 1; temp < limit && data[temp] == data2[temp]; temp++);
++ lenTest2 = temp - 1;
++ if (lenTest2 >= 2)
++ {
++ UInt32 state2 = kLiteralNextStates[state];
++ UInt32 posStateNext = (position + 1) & p->pbMask;
++ UInt32 nextRepMatchPrice = curAnd1Price +
++ GET_PRICE_1(p->isMatch[state2][posStateNext]) +
++ GET_PRICE_1(p->isRep[state2]);
++ /* for (; lenTest2 >= 2; lenTest2--) */
++ {
++ UInt32 curAndLenPrice;
++ COptimal *opt;
++ UInt32 offset = cur + 1 + lenTest2;
++ while (lenEnd < offset)
++ p->opt[++lenEnd].price = kInfinityPrice;
++ curAndLenPrice = nextRepMatchPrice + GetRepPrice(p, 0, lenTest2, state2, posStateNext);
++ opt = &p->opt[offset];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = cur + 1;
++ opt->backPrev = 0;
++ opt->prev1IsChar = True;
++ opt->prev2 = False;
++ }
++ }
++ }
++ }
++
++ startLen = 2; /* speed optimization */
++ {
++ UInt32 repIndex;
++ for (repIndex = 0; repIndex < LZMA_NUM_REPS; repIndex++)
++ {
++ UInt32 lenTest;
++ UInt32 lenTestTemp;
++ UInt32 price;
++ const Byte *data2 = data - (reps[repIndex] + 1);
++ if (data[0] != data2[0] || data[1] != data2[1])
++ continue;
++ for (lenTest = 2; lenTest < numAvail && data[lenTest] == data2[lenTest]; lenTest++);
++ while (lenEnd < cur + lenTest)
++ p->opt[++lenEnd].price = kInfinityPrice;
++ lenTestTemp = lenTest;
++ price = repMatchPrice + GetPureRepPrice(p, repIndex, state, posState);
++ do
++ {
++ UInt32 curAndLenPrice = price + p->repLenEnc.prices[posState][lenTest - 2];
++ COptimal *opt = &p->opt[cur + lenTest];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = cur;
++ opt->backPrev = repIndex;
++ opt->prev1IsChar = False;
++ }
++ }
++ while (--lenTest >= 2);
++ lenTest = lenTestTemp;
++
++ if (repIndex == 0)
++ startLen = lenTest + 1;
++
++ /* if (_maxMode) */
++ {
++ UInt32 lenTest2 = lenTest + 1;
++ UInt32 limit = lenTest2 + p->numFastBytes;
++ UInt32 nextRepMatchPrice;
++ if (limit > numAvailFull)
++ limit = numAvailFull;
++ for (; lenTest2 < limit && data[lenTest2] == data2[lenTest2]; lenTest2++);
++ lenTest2 -= lenTest + 1;
++ if (lenTest2 >= 2)
++ {
++ UInt32 state2 = kRepNextStates[state];
++ UInt32 posStateNext = (position + lenTest) & p->pbMask;
++ UInt32 curAndLenCharPrice =
++ price + p->repLenEnc.prices[posState][lenTest - 2] +
++ GET_PRICE_0(p->isMatch[state2][posStateNext]) +
++ LitEnc_GetPriceMatched(LIT_PROBS(position + lenTest, data[lenTest - 1]),
++ data[lenTest], data2[lenTest], p->ProbPrices);
++ state2 = kLiteralNextStates[state2];
++ posStateNext = (position + lenTest + 1) & p->pbMask;
++ nextRepMatchPrice = curAndLenCharPrice +
++ GET_PRICE_1(p->isMatch[state2][posStateNext]) +
++ GET_PRICE_1(p->isRep[state2]);
++
++ /* for (; lenTest2 >= 2; lenTest2--) */
++ {
++ UInt32 curAndLenPrice;
++ COptimal *opt;
++ UInt32 offset = cur + lenTest + 1 + lenTest2;
++ while (lenEnd < offset)
++ p->opt[++lenEnd].price = kInfinityPrice;
++ curAndLenPrice = nextRepMatchPrice + GetRepPrice(p, 0, lenTest2, state2, posStateNext);
++ opt = &p->opt[offset];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = cur + lenTest + 1;
++ opt->backPrev = 0;
++ opt->prev1IsChar = True;
++ opt->prev2 = True;
++ opt->posPrev2 = cur;
++ opt->backPrev2 = repIndex;
++ }
++ }
++ }
++ }
++ }
++ }
++ /* for (UInt32 lenTest = 2; lenTest <= newLen; lenTest++) */
++ if (newLen > numAvail)
++ {
++ newLen = numAvail;
++ for (numPairs = 0; newLen > matches[numPairs]; numPairs += 2);
++ matches[numPairs] = newLen;
++ numPairs += 2;
++ }
++ if (newLen >= startLen)
++ {
++ UInt32 normalMatchPrice = matchPrice + GET_PRICE_0(p->isRep[state]);
++ UInt32 offs, curBack, posSlot;
++ UInt32 lenTest;
++ while (lenEnd < cur + newLen)
++ p->opt[++lenEnd].price = kInfinityPrice;
++
++ offs = 0;
++ while (startLen > matches[offs])
++ offs += 2;
++ curBack = matches[offs + 1];
++ GetPosSlot2(curBack, posSlot);
++ for (lenTest = /*2*/ startLen; ; lenTest++)
++ {
++ UInt32 curAndLenPrice = normalMatchPrice + p->lenEnc.prices[posState][lenTest - LZMA_MATCH_LEN_MIN];
++ UInt32 lenToPosState = GetLenToPosState(lenTest);
++ COptimal *opt;
++ if (curBack < kNumFullDistances)
++ curAndLenPrice += p->distancesPrices[lenToPosState][curBack];
++ else
++ curAndLenPrice += p->posSlotPrices[lenToPosState][posSlot] + p->alignPrices[curBack & kAlignMask];
++
++ opt = &p->opt[cur + lenTest];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = cur;
++ opt->backPrev = curBack + LZMA_NUM_REPS;
++ opt->prev1IsChar = False;
++ }
++
++ if (/*_maxMode && */lenTest == matches[offs])
++ {
++ /* Try Match + Literal + Rep0 */
++ const Byte *data2 = data - (curBack + 1);
++ UInt32 lenTest2 = lenTest + 1;
++ UInt32 limit = lenTest2 + p->numFastBytes;
++ UInt32 nextRepMatchPrice;
++ if (limit > numAvailFull)
++ limit = numAvailFull;
++ for (; lenTest2 < limit && data[lenTest2] == data2[lenTest2]; lenTest2++);
++ lenTest2 -= lenTest + 1;
++ if (lenTest2 >= 2)
++ {
++ UInt32 state2 = kMatchNextStates[state];
++ UInt32 posStateNext = (position + lenTest) & p->pbMask;
++ UInt32 curAndLenCharPrice = curAndLenPrice +
++ GET_PRICE_0(p->isMatch[state2][posStateNext]) +
++ LitEnc_GetPriceMatched(LIT_PROBS(position + lenTest, data[lenTest - 1]),
++ data[lenTest], data2[lenTest], p->ProbPrices);
++ state2 = kLiteralNextStates[state2];
++ posStateNext = (posStateNext + 1) & p->pbMask;
++ nextRepMatchPrice = curAndLenCharPrice +
++ GET_PRICE_1(p->isMatch[state2][posStateNext]) +
++ GET_PRICE_1(p->isRep[state2]);
++
++ /* for (; lenTest2 >= 2; lenTest2--) */
++ {
++ UInt32 offset = cur + lenTest + 1 + lenTest2;
++ UInt32 curAndLenPrice;
++ COptimal *opt;
++ while (lenEnd < offset)
++ p->opt[++lenEnd].price = kInfinityPrice;
++ curAndLenPrice = nextRepMatchPrice + GetRepPrice(p, 0, lenTest2, state2, posStateNext);
++ opt = &p->opt[offset];
++ if (curAndLenPrice < opt->price)
++ {
++ opt->price = curAndLenPrice;
++ opt->posPrev = cur + lenTest + 1;
++ opt->backPrev = 0;
++ opt->prev1IsChar = True;
++ opt->prev2 = True;
++ opt->posPrev2 = cur;
++ opt->backPrev2 = curBack + LZMA_NUM_REPS;
++ }
++ }
++ }
++ offs += 2;
++ if (offs == numPairs)
++ break;
++ curBack = matches[offs + 1];
++ if (curBack >= kNumFullDistances)
++ GetPosSlot2(curBack, posSlot);
++ }
++ }
++ }
++ }
++}
++
++#define ChangePair(smallDist, bigDist) (((bigDist) >> 7) > (smallDist))
++
++static UInt32 GetOptimumFast(CLzmaEnc *p, UInt32 *backRes)
++{
++ UInt32 numAvail, mainLen, mainDist, numPairs, repIndex, repLen, i;
++ const Byte *data;
++ const UInt32 *matches;
++
++ if (p->additionalOffset == 0)
++ mainLen = ReadMatchDistances(p, &numPairs);
++ else
++ {
++ mainLen = p->longestMatchLength;
++ numPairs = p->numPairs;
++ }
++
++ numAvail = p->numAvail;
++ *backRes = (UInt32)-1;
++ if (numAvail < 2)
++ return 1;
++ if (numAvail > LZMA_MATCH_LEN_MAX)
++ numAvail = LZMA_MATCH_LEN_MAX;
++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
++
++ repLen = repIndex = 0;
++ for (i = 0; i < LZMA_NUM_REPS; i++)
++ {
++ UInt32 len;
++ const Byte *data2 = data - (p->reps[i] + 1);
++ if (data[0] != data2[0] || data[1] != data2[1])
++ continue;
++ for (len = 2; len < numAvail && data[len] == data2[len]; len++);
++ if (len >= p->numFastBytes)
++ {
++ *backRes = i;
++ MovePos(p, len - 1);
++ return len;
++ }
++ if (len > repLen)
++ {
++ repIndex = i;
++ repLen = len;
++ }
++ }
++
++ matches = p->matches;
++ if (mainLen >= p->numFastBytes)
++ {
++ *backRes = matches[numPairs - 1] + LZMA_NUM_REPS;
++ MovePos(p, mainLen - 1);
++ return mainLen;
++ }
++
++ mainDist = 0; /* for GCC */
++ if (mainLen >= 2)
++ {
++ mainDist = matches[numPairs - 1];
++ while (numPairs > 2 && mainLen == matches[numPairs - 4] + 1)
++ {
++ if (!ChangePair(matches[numPairs - 3], mainDist))
++ break;
++ numPairs -= 2;
++ mainLen = matches[numPairs - 2];
++ mainDist = matches[numPairs - 1];
++ }
++ if (mainLen == 2 && mainDist >= 0x80)
++ mainLen = 1;
++ }
++
++ if (repLen >= 2 && (
++ (repLen + 1 >= mainLen) ||
++ (repLen + 2 >= mainLen && mainDist >= (1 << 9)) ||
++ (repLen + 3 >= mainLen && mainDist >= (1 << 15))))
++ {
++ *backRes = repIndex;
++ MovePos(p, repLen - 1);
++ return repLen;
++ }
++
++ if (mainLen < 2 || numAvail <= 2)
++ return 1;
++
++ p->longestMatchLength = ReadMatchDistances(p, &p->numPairs);
++ if (p->longestMatchLength >= 2)
++ {
++ UInt32 newDistance = matches[p->numPairs - 1];
++ if ((p->longestMatchLength >= mainLen && newDistance < mainDist) ||
++ (p->longestMatchLength == mainLen + 1 && !ChangePair(mainDist, newDistance)) ||
++ (p->longestMatchLength > mainLen + 1) ||
++ (p->longestMatchLength + 1 >= mainLen && mainLen >= 3 && ChangePair(newDistance, mainDist)))
++ return 1;
++ }
++
++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
++ for (i = 0; i < LZMA_NUM_REPS; i++)
++ {
++ UInt32 len, limit;
++ const Byte *data2 = data - (p->reps[i] + 1);
++ if (data[0] != data2[0] || data[1] != data2[1])
++ continue;
++ limit = mainLen - 1;
++ for (len = 2; len < limit && data[len] == data2[len]; len++);
++ if (len >= limit)
++ return 1;
++ }
++ *backRes = mainDist + LZMA_NUM_REPS;
++ MovePos(p, mainLen - 2);
++ return mainLen;
++}
++
++static void WriteEndMarker(CLzmaEnc *p, UInt32 posState)
++{
++ UInt32 len;
++ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][posState], 1);
++ RangeEnc_EncodeBit(&p->rc, &p->isRep[p->state], 0);
++ p->state = kMatchNextStates[p->state];
++ len = LZMA_MATCH_LEN_MIN;
++ LenEnc_Encode2(&p->lenEnc, &p->rc, len - LZMA_MATCH_LEN_MIN, posState, !p->fastMode, p->ProbPrices);
++ RcTree_Encode(&p->rc, p->posSlotEncoder[GetLenToPosState(len)], kNumPosSlotBits, (1 << kNumPosSlotBits) - 1);
++ RangeEnc_EncodeDirectBits(&p->rc, (((UInt32)1 << 30) - 1) >> kNumAlignBits, 30 - kNumAlignBits);
++ RcTree_ReverseEncode(&p->rc, p->posAlignEncoder, kNumAlignBits, kAlignMask);
++}
++
++static SRes CheckErrors(CLzmaEnc *p)
++{
++ if (p->result != SZ_OK)
++ return p->result;
++ if (p->rc.res != SZ_OK)
++ p->result = SZ_ERROR_WRITE;
++ if (p->matchFinderBase.result != SZ_OK)
++ p->result = SZ_ERROR_READ;
++ if (p->result != SZ_OK)
++ p->finished = True;
++ return p->result;
++}
++
++static SRes Flush(CLzmaEnc *p, UInt32 nowPos)
++{
++ /* ReleaseMFStream(); */
++ p->finished = True;
++ if (p->writeEndMark)
++ WriteEndMarker(p, nowPos & p->pbMask);
++ RangeEnc_FlushData(&p->rc);
++ RangeEnc_FlushStream(&p->rc);
++ return CheckErrors(p);
++}
++
++static void FillAlignPrices(CLzmaEnc *p)
++{
++ UInt32 i;
++ for (i = 0; i < kAlignTableSize; i++)
++ p->alignPrices[i] = RcTree_ReverseGetPrice(p->posAlignEncoder, kNumAlignBits, i, p->ProbPrices);
++ p->alignPriceCount = 0;
++}
++
++static void FillDistancesPrices(CLzmaEnc *p)
++{
++ UInt32 tempPrices[kNumFullDistances];
++ UInt32 i, lenToPosState;
++ for (i = kStartPosModelIndex; i < kNumFullDistances; i++)
++ {
++ UInt32 posSlot = GetPosSlot1(i);
++ UInt32 footerBits = ((posSlot >> 1) - 1);
++ UInt32 base = ((2 | (posSlot & 1)) << footerBits);
++ tempPrices[i] = RcTree_ReverseGetPrice(p->posEncoders + base - posSlot - 1, footerBits, i - base, p->ProbPrices);
++ }
++
++ for (lenToPosState = 0; lenToPosState < kNumLenToPosStates; lenToPosState++)
++ {
++ UInt32 posSlot;
++ const CLzmaProb *encoder = p->posSlotEncoder[lenToPosState];
++ UInt32 *posSlotPrices = p->posSlotPrices[lenToPosState];
++ for (posSlot = 0; posSlot < p->distTableSize; posSlot++)
++ posSlotPrices[posSlot] = RcTree_GetPrice(encoder, kNumPosSlotBits, posSlot, p->ProbPrices);
++ for (posSlot = kEndPosModelIndex; posSlot < p->distTableSize; posSlot++)
++ posSlotPrices[posSlot] += ((((posSlot >> 1) - 1) - kNumAlignBits) << kNumBitPriceShiftBits);
++
++ {
++ UInt32 *distancesPrices = p->distancesPrices[lenToPosState];
++ UInt32 i;
++ for (i = 0; i < kStartPosModelIndex; i++)
++ distancesPrices[i] = posSlotPrices[i];
++ for (; i < kNumFullDistances; i++)
++ distancesPrices[i] = posSlotPrices[GetPosSlot1(i)] + tempPrices[i];
++ }
++ }
++ p->matchPriceCount = 0;
++}
++
++void LzmaEnc_Construct(CLzmaEnc *p)
++{
++ RangeEnc_Construct(&p->rc);
++ MatchFinder_Construct(&p->matchFinderBase);
++ #ifndef _7ZIP_ST
++ MatchFinderMt_Construct(&p->matchFinderMt);
++ p->matchFinderMt.MatchFinder = &p->matchFinderBase;
++ #endif
++
++ {
++ CLzmaEncProps props;
++ LzmaEncProps_Init(&props);
++ LzmaEnc_SetProps(p, &props);
++ }
++
++ #ifndef LZMA_LOG_BSR
++ LzmaEnc_FastPosInit(p->g_FastPos);
++ #endif
++
++ LzmaEnc_InitPriceTables(p->ProbPrices);
++ p->litProbs = 0;
++ p->saveState.litProbs = 0;
++}
++
++CLzmaEncHandle LzmaEnc_Create(ISzAlloc *alloc)
++{
++ void *p;
++ p = alloc->Alloc(alloc, sizeof(CLzmaEnc));
++ if (p != 0)
++ LzmaEnc_Construct((CLzmaEnc *)p);
++ return p;
++}
++
++void LzmaEnc_FreeLits(CLzmaEnc *p, ISzAlloc *alloc)
++{
++ alloc->Free(alloc, p->litProbs);
++ alloc->Free(alloc, p->saveState.litProbs);
++ p->litProbs = 0;
++ p->saveState.litProbs = 0;
++}
++
++void LzmaEnc_Destruct(CLzmaEnc *p, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ #ifndef _7ZIP_ST
++ MatchFinderMt_Destruct(&p->matchFinderMt, allocBig);
++ #endif
++ MatchFinder_Free(&p->matchFinderBase, allocBig);
++ LzmaEnc_FreeLits(p, alloc);
++ RangeEnc_Free(&p->rc, alloc);
++}
++
++void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ LzmaEnc_Destruct((CLzmaEnc *)p, alloc, allocBig);
++ alloc->Free(alloc, p);
++}
++
++static SRes LzmaEnc_CodeOneBlock(CLzmaEnc *p, Bool useLimits, UInt32 maxPackSize, UInt32 maxUnpackSize)
++{
++ UInt32 nowPos32, startPos32;
++ if (p->needInit)
++ {
++ p->matchFinder.Init(p->matchFinderObj);
++ p->needInit = 0;
++ }
++
++ if (p->finished)
++ return p->result;
++ RINOK(CheckErrors(p));
++
++ nowPos32 = (UInt32)p->nowPos64;
++ startPos32 = nowPos32;
++
++ if (p->nowPos64 == 0)
++ {
++ UInt32 numPairs;
++ Byte curByte;
++ if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) == 0)
++ return Flush(p, nowPos32);
++ ReadMatchDistances(p, &numPairs);
++ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][0], 0);
++ p->state = kLiteralNextStates[p->state];
++ curByte = p->matchFinder.GetIndexByte(p->matchFinderObj, 0 - p->additionalOffset);
++ LitEnc_Encode(&p->rc, p->litProbs, curByte);
++ p->additionalOffset--;
++ nowPos32++;
++ }
++
++ if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) != 0)
++ for (;;)
++ {
++ UInt32 pos, len, posState;
++
++ if (p->fastMode)
++ len = GetOptimumFast(p, &pos);
++ else
++ len = GetOptimum(p, nowPos32, &pos);
++
++ #ifdef SHOW_STAT2
++ printf("\n pos = %4X, len = %d pos = %d", nowPos32, len, pos);
++ #endif
++
++ posState = nowPos32 & p->pbMask;
++ if (len == 1 && pos == (UInt32)-1)
++ {
++ Byte curByte;
++ CLzmaProb *probs;
++ const Byte *data;
++
++ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][posState], 0);
++ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset;
++ curByte = *data;
++ probs = LIT_PROBS(nowPos32, *(data - 1));
++ if (IsCharState(p->state))
++ LitEnc_Encode(&p->rc, probs, curByte);
++ else
++ LitEnc_EncodeMatched(&p->rc, probs, curByte, *(data - p->reps[0] - 1));
++ p->state = kLiteralNextStates[p->state];
++ }
++ else
++ {
++ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][posState], 1);
++ if (pos < LZMA_NUM_REPS)
++ {
++ RangeEnc_EncodeBit(&p->rc, &p->isRep[p->state], 1);
++ if (pos == 0)
++ {
++ RangeEnc_EncodeBit(&p->rc, &p->isRepG0[p->state], 0);
++ RangeEnc_EncodeBit(&p->rc, &p->isRep0Long[p->state][posState], ((len == 1) ? 0 : 1));
++ }
++ else
++ {
++ UInt32 distance = p->reps[pos];
++ RangeEnc_EncodeBit(&p->rc, &p->isRepG0[p->state], 1);
++ if (pos == 1)
++ RangeEnc_EncodeBit(&p->rc, &p->isRepG1[p->state], 0);
++ else
++ {
++ RangeEnc_EncodeBit(&p->rc, &p->isRepG1[p->state], 1);
++ RangeEnc_EncodeBit(&p->rc, &p->isRepG2[p->state], pos - 2);
++ if (pos == 3)
++ p->reps[3] = p->reps[2];
++ p->reps[2] = p->reps[1];
++ }
++ p->reps[1] = p->reps[0];
++ p->reps[0] = distance;
++ }
++ if (len == 1)
++ p->state = kShortRepNextStates[p->state];
++ else
++ {
++ LenEnc_Encode2(&p->repLenEnc, &p->rc, len - LZMA_MATCH_LEN_MIN, posState, !p->fastMode, p->ProbPrices);
++ p->state = kRepNextStates[p->state];
++ }
++ }
++ else
++ {
++ UInt32 posSlot;
++ RangeEnc_EncodeBit(&p->rc, &p->isRep[p->state], 0);
++ p->state = kMatchNextStates[p->state];
++ LenEnc_Encode2(&p->lenEnc, &p->rc, len - LZMA_MATCH_LEN_MIN, posState, !p->fastMode, p->ProbPrices);
++ pos -= LZMA_NUM_REPS;
++ GetPosSlot(pos, posSlot);
++ RcTree_Encode(&p->rc, p->posSlotEncoder[GetLenToPosState(len)], kNumPosSlotBits, posSlot);
++
++ if (posSlot >= kStartPosModelIndex)
++ {
++ UInt32 footerBits = ((posSlot >> 1) - 1);
++ UInt32 base = ((2 | (posSlot & 1)) << footerBits);
++ UInt32 posReduced = pos - base;
++
++ if (posSlot < kEndPosModelIndex)
++ RcTree_ReverseEncode(&p->rc, p->posEncoders + base - posSlot - 1, footerBits, posReduced);
++ else
++ {
++ RangeEnc_EncodeDirectBits(&p->rc, posReduced >> kNumAlignBits, footerBits - kNumAlignBits);
++ RcTree_ReverseEncode(&p->rc, p->posAlignEncoder, kNumAlignBits, posReduced & kAlignMask);
++ p->alignPriceCount++;
++ }
++ }
++ p->reps[3] = p->reps[2];
++ p->reps[2] = p->reps[1];
++ p->reps[1] = p->reps[0];
++ p->reps[0] = pos;
++ p->matchPriceCount++;
++ }
++ }
++ p->additionalOffset -= len;
++ nowPos32 += len;
++ if (p->additionalOffset == 0)
++ {
++ UInt32 processed;
++ if (!p->fastMode)
++ {
++ if (p->matchPriceCount >= (1 << 7))
++ FillDistancesPrices(p);
++ if (p->alignPriceCount >= kAlignTableSize)
++ FillAlignPrices(p);
++ }
++ if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) == 0)
++ break;
++ processed = nowPos32 - startPos32;
++ if (useLimits)
++ {
++ if (processed + kNumOpts + 300 >= maxUnpackSize ||
++ RangeEnc_GetProcessed(&p->rc) + kNumOpts * 2 >= maxPackSize)
++ break;
++ }
++ else if (processed >= (1 << 15))
++ {
++ p->nowPos64 += nowPos32 - startPos32;
++ return CheckErrors(p);
++ }
++ }
++ }
++ p->nowPos64 += nowPos32 - startPos32;
++ return Flush(p, nowPos32);
++}
++
++#define kBigHashDicLimit ((UInt32)1 << 24)
++
++static SRes LzmaEnc_Alloc(CLzmaEnc *p, UInt32 keepWindowSize, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ UInt32 beforeSize = kNumOpts;
++ Bool btMode;
++ if (!RangeEnc_Alloc(&p->rc, alloc))
++ return SZ_ERROR_MEM;
++ btMode = (p->matchFinderBase.btMode != 0);
++ #ifndef _7ZIP_ST
++ p->mtMode = (p->multiThread && !p->fastMode && btMode);
++ #endif
++
++ {
++ unsigned lclp = p->lc + p->lp;
++ if (p->litProbs == 0 || p->saveState.litProbs == 0 || p->lclp != lclp)
++ {
++ LzmaEnc_FreeLits(p, alloc);
++ p->litProbs = (CLzmaProb *)alloc->Alloc(alloc, (0x300 << lclp) * sizeof(CLzmaProb));
++ p->saveState.litProbs = (CLzmaProb *)alloc->Alloc(alloc, (0x300 << lclp) * sizeof(CLzmaProb));
++ if (p->litProbs == 0 || p->saveState.litProbs == 0)
++ {
++ LzmaEnc_FreeLits(p, alloc);
++ return SZ_ERROR_MEM;
++ }
++ p->lclp = lclp;
++ }
++ }
++
++ p->matchFinderBase.bigHash = (p->dictSize > kBigHashDicLimit);
++
++ if (beforeSize + p->dictSize < keepWindowSize)
++ beforeSize = keepWindowSize - p->dictSize;
++
++ #ifndef _7ZIP_ST
++ if (p->mtMode)
++ {
++ RINOK(MatchFinderMt_Create(&p->matchFinderMt, p->dictSize, beforeSize, p->numFastBytes, LZMA_MATCH_LEN_MAX, allocBig));
++ p->matchFinderObj = &p->matchFinderMt;
++ MatchFinderMt_CreateVTable(&p->matchFinderMt, &p->matchFinder);
++ }
++ else
++ #endif
++ {
++ if (!MatchFinder_Create(&p->matchFinderBase, p->dictSize, beforeSize, p->numFastBytes, LZMA_MATCH_LEN_MAX, allocBig))
++ return SZ_ERROR_MEM;
++ p->matchFinderObj = &p->matchFinderBase;
++ MatchFinder_CreateVTable(&p->matchFinderBase, &p->matchFinder);
++ }
++ return SZ_OK;
++}
++
++void LzmaEnc_Init(CLzmaEnc *p)
++{
++ UInt32 i;
++ p->state = 0;
++ for (i = 0 ; i < LZMA_NUM_REPS; i++)
++ p->reps[i] = 0;
++
++ RangeEnc_Init(&p->rc);
++
++
++ for (i = 0; i < kNumStates; i++)
++ {
++ UInt32 j;
++ for (j = 0; j < LZMA_NUM_PB_STATES_MAX; j++)
++ {
++ p->isMatch[i][j] = kProbInitValue;
++ p->isRep0Long[i][j] = kProbInitValue;
++ }
++ p->isRep[i] = kProbInitValue;
++ p->isRepG0[i] = kProbInitValue;
++ p->isRepG1[i] = kProbInitValue;
++ p->isRepG2[i] = kProbInitValue;
++ }
++
++ {
++ UInt32 num = 0x300 << (p->lp + p->lc);
++ for (i = 0; i < num; i++)
++ p->litProbs[i] = kProbInitValue;
++ }
++
++ {
++ for (i = 0; i < kNumLenToPosStates; i++)
++ {
++ CLzmaProb *probs = p->posSlotEncoder[i];
++ UInt32 j;
++ for (j = 0; j < (1 << kNumPosSlotBits); j++)
++ probs[j] = kProbInitValue;
++ }
++ }
++ {
++ for (i = 0; i < kNumFullDistances - kEndPosModelIndex; i++)
++ p->posEncoders[i] = kProbInitValue;
++ }
++
++ LenEnc_Init(&p->lenEnc.p);
++ LenEnc_Init(&p->repLenEnc.p);
++
++ for (i = 0; i < (1 << kNumAlignBits); i++)
++ p->posAlignEncoder[i] = kProbInitValue;
++
++ p->optimumEndIndex = 0;
++ p->optimumCurrentIndex = 0;
++ p->additionalOffset = 0;
++
++ p->pbMask = (1 << p->pb) - 1;
++ p->lpMask = (1 << p->lp) - 1;
++}
++
++void LzmaEnc_InitPrices(CLzmaEnc *p)
++{
++ if (!p->fastMode)
++ {
++ FillDistancesPrices(p);
++ FillAlignPrices(p);
++ }
++
++ p->lenEnc.tableSize =
++ p->repLenEnc.tableSize =
++ p->numFastBytes + 1 - LZMA_MATCH_LEN_MIN;
++ LenPriceEnc_UpdateTables(&p->lenEnc, 1 << p->pb, p->ProbPrices);
++ LenPriceEnc_UpdateTables(&p->repLenEnc, 1 << p->pb, p->ProbPrices);
++}
++
++static SRes LzmaEnc_AllocAndInit(CLzmaEnc *p, UInt32 keepWindowSize, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ UInt32 i;
++ for (i = 0; i < (UInt32)kDicLogSizeMaxCompress; i++)
++ if (p->dictSize <= ((UInt32)1 << i))
++ break;
++ p->distTableSize = i * 2;
++
++ p->finished = False;
++ p->result = SZ_OK;
++ RINOK(LzmaEnc_Alloc(p, keepWindowSize, alloc, allocBig));
++ LzmaEnc_Init(p);
++ LzmaEnc_InitPrices(p);
++ p->nowPos64 = 0;
++ return SZ_OK;
++}
++
++static SRes LzmaEnc_Prepare(CLzmaEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStream,
++ ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ p->matchFinderBase.stream = inStream;
++ p->needInit = 1;
++ p->rc.outStream = outStream;
++ return LzmaEnc_AllocAndInit(p, 0, alloc, allocBig);
++}
++
++SRes LzmaEnc_PrepareForLzma2(CLzmaEncHandle pp,
++ ISeqInStream *inStream, UInt32 keepWindowSize,
++ ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ p->matchFinderBase.stream = inStream;
++ p->needInit = 1;
++ return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig);
++}
++
++static void LzmaEnc_SetInputBuf(CLzmaEnc *p, const Byte *src, SizeT srcLen)
++{
++ p->matchFinderBase.directInput = 1;
++ p->matchFinderBase.bufferBase = (Byte *)src;
++ p->matchFinderBase.directInputRem = srcLen;
++}
++
++SRes LzmaEnc_MemPrepare(CLzmaEncHandle pp, const Byte *src, SizeT srcLen,
++ UInt32 keepWindowSize, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ LzmaEnc_SetInputBuf(p, src, srcLen);
++ p->needInit = 1;
++
++ return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig);
++}
++
++void LzmaEnc_Finish(CLzmaEncHandle pp)
++{
++ #ifndef _7ZIP_ST
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ if (p->mtMode)
++ MatchFinderMt_ReleaseStream(&p->matchFinderMt);
++ #else
++ pp = pp;
++ #endif
++}
++
++typedef struct
++{
++ ISeqOutStream funcTable;
++ Byte *data;
++ SizeT rem;
++ Bool overflow;
++} CSeqOutStreamBuf;
++
++static size_t MyWrite(void *pp, const void *data, size_t size)
++{
++ CSeqOutStreamBuf *p = (CSeqOutStreamBuf *)pp;
++ if (p->rem < size)
++ {
++ size = p->rem;
++ p->overflow = True;
++ }
++ memcpy(p->data, data, size);
++ p->rem -= size;
++ p->data += size;
++ return size;
++}
++
++
++UInt32 LzmaEnc_GetNumAvailableBytes(CLzmaEncHandle pp)
++{
++ const CLzmaEnc *p = (CLzmaEnc *)pp;
++ return p->matchFinder.GetNumAvailableBytes(p->matchFinderObj);
++}
++
++const Byte *LzmaEnc_GetCurBuf(CLzmaEncHandle pp)
++{
++ const CLzmaEnc *p = (CLzmaEnc *)pp;
++ return p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset;
++}
++
++SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle pp, Bool reInit,
++ Byte *dest, size_t *destLen, UInt32 desiredPackSize, UInt32 *unpackSize)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ UInt64 nowPos64;
++ SRes res;
++ CSeqOutStreamBuf outStream;
++
++ outStream.funcTable.Write = MyWrite;
++ outStream.data = dest;
++ outStream.rem = *destLen;
++ outStream.overflow = False;
++
++ p->writeEndMark = False;
++ p->finished = False;
++ p->result = SZ_OK;
++
++ if (reInit)
++ LzmaEnc_Init(p);
++ LzmaEnc_InitPrices(p);
++ nowPos64 = p->nowPos64;
++ RangeEnc_Init(&p->rc);
++ p->rc.outStream = &outStream.funcTable;
++
++ res = LzmaEnc_CodeOneBlock(p, True, desiredPackSize, *unpackSize);
++
++ *unpackSize = (UInt32)(p->nowPos64 - nowPos64);
++ *destLen -= outStream.rem;
++ if (outStream.overflow)
++ return SZ_ERROR_OUTPUT_EOF;
++
++ return res;
++}
++
++static SRes LzmaEnc_Encode2(CLzmaEnc *p, ICompressProgress *progress)
++{
++ SRes res = SZ_OK;
++
++ #ifndef _7ZIP_ST
++ Byte allocaDummy[0x300];
++ int i = 0;
++ for (i = 0; i < 16; i++)
++ allocaDummy[i] = (Byte)i;
++ #endif
++
++ for (;;)
++ {
++ res = LzmaEnc_CodeOneBlock(p, False, 0, 0);
++ if (res != SZ_OK || p->finished != 0)
++ break;
++ if (progress != 0)
++ {
++ res = progress->Progress(progress, p->nowPos64, RangeEnc_GetProcessed(&p->rc));
++ if (res != SZ_OK)
++ {
++ res = SZ_ERROR_PROGRESS;
++ break;
++ }
++ }
++ }
++ LzmaEnc_Finish(p);
++ return res;
++}
++
++SRes LzmaEnc_Encode(CLzmaEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStream, ICompressProgress *progress,
++ ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ RINOK(LzmaEnc_Prepare(pp, outStream, inStream, alloc, allocBig));
++ return LzmaEnc_Encode2((CLzmaEnc *)pp, progress);
++}
++
++SRes LzmaEnc_WriteProperties(CLzmaEncHandle pp, Byte *props, SizeT *size)
++{
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++ int i;
++ UInt32 dictSize = p->dictSize;
++ if (*size < LZMA_PROPS_SIZE)
++ return SZ_ERROR_PARAM;
++ *size = LZMA_PROPS_SIZE;
++ props[0] = (Byte)((p->pb * 5 + p->lp) * 9 + p->lc);
++
++ for (i = 11; i <= 30; i++)
++ {
++ if (dictSize <= ((UInt32)2 << i))
++ {
++ dictSize = (2 << i);
++ break;
++ }
++ if (dictSize <= ((UInt32)3 << i))
++ {
++ dictSize = (3 << i);
++ break;
++ }
++ }
++
++ for (i = 0; i < 4; i++)
++ props[1 + i] = (Byte)(dictSize >> (8 * i));
++ return SZ_OK;
++}
++
++SRes LzmaEnc_MemEncode(CLzmaEncHandle pp, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
++ int writeEndMark, ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ SRes res;
++ CLzmaEnc *p = (CLzmaEnc *)pp;
++
++ CSeqOutStreamBuf outStream;
++
++ LzmaEnc_SetInputBuf(p, src, srcLen);
++
++ outStream.funcTable.Write = MyWrite;
++ outStream.data = dest;
++ outStream.rem = *destLen;
++ outStream.overflow = False;
++
++ p->writeEndMark = writeEndMark;
++
++ p->rc.outStream = &outStream.funcTable;
++ res = LzmaEnc_MemPrepare(pp, src, srcLen, 0, alloc, allocBig);
++ if (res == SZ_OK)
++ res = LzmaEnc_Encode2(p, progress);
++
++ *destLen -= outStream.rem;
++ if (outStream.overflow)
++ return SZ_ERROR_OUTPUT_EOF;
++ return res;
++}
++
++SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
++ const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark,
++ ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig)
++{
++ CLzmaEnc *p = (CLzmaEnc *)LzmaEnc_Create(alloc);
++ SRes res;
++ if (p == 0)
++ return SZ_ERROR_MEM;
++
++ res = LzmaEnc_SetProps(p, props);
++ if (res == SZ_OK)
++ {
++ res = LzmaEnc_WriteProperties(p, propsEncoded, propsSize);
++ if (res == SZ_OK)
++ res = LzmaEnc_MemEncode(p, dest, destLen, src, srcLen,
++ writeEndMark, progress, alloc, allocBig);
++ }
++
++ LzmaEnc_Destroy(p, alloc, allocBig);
++ return res;
++}
+--- /dev/null
++++ b/lib/lzma/Makefile
+@@ -0,0 +1,7 @@
++lzma_compress-objs := LzFind.o LzmaEnc.o
++lzma_decompress-objs := LzmaDec.o
++
++obj-$(CONFIG_LZMA_COMPRESS) += lzma_compress.o
++obj-$(CONFIG_LZMA_DECOMPRESS) += lzma_decompress.o
++
++EXTRA_CFLAGS += -Iinclude/linux -Iinclude/linux/lzma -include types.h
diff --git a/target/linux/generic/pending-5.10/532-jffs2_eofdetect.patch b/target/linux/generic/pending-5.10/532-jffs2_eofdetect.patch
new file mode 100644
index 0000000000..a1d7dc1a69
--- /dev/null
+++ b/target/linux/generic/pending-5.10/532-jffs2_eofdetect.patch
@@ -0,0 +1,65 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: fs: jffs2: EOF marker
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ fs/jffs2/build.c | 10 ++++++++++
+ fs/jffs2/scan.c | 21 +++++++++++++++++++--
+ 2 files changed, 29 insertions(+), 2 deletions(-)
+
+--- a/fs/jffs2/build.c
++++ b/fs/jffs2/build.c
+@@ -117,6 +117,16 @@ static int jffs2_build_filesystem(struct
+ dbg_fsbuild("scanned flash completely\n");
+ jffs2_dbg_dump_block_lists_nolock(c);
+
++ if (c->flags & (1 << 7)) {
++ printk("%s(): unlocking the mtd device... ", __func__);
++ mtd_unlock(c->mtd, 0, c->mtd->size);
++ printk("done.\n");
++
++ printk("%s(): erasing all blocks after the end marker... ", __func__);
++ jffs2_erase_pending_blocks(c, -1);
++ printk("done.\n");
++ }
++
+ dbg_fsbuild("pass 1 starting\n");
+ c->flags |= JFFS2_SB_FLAG_BUILDING;
+ /* Now scan the directory tree, increasing nlink according to every dirent found. */
+--- a/fs/jffs2/scan.c
++++ b/fs/jffs2/scan.c
+@@ -148,8 +148,14 @@ int jffs2_scan_medium(struct jffs2_sb_in
+ /* reset summary info for next eraseblock scan */
+ jffs2_sum_reset_collected(s);
+
+- ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
+- buf_size, s);
++ if (c->flags & (1 << 7)) {
++ if (mtd_block_isbad(c->mtd, jeb->offset))
++ ret = BLK_STATE_BADBLOCK;
++ else
++ ret = BLK_STATE_ALLFF;
++ } else
++ ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
++ buf_size, s);
+
+ if (ret < 0)
+ goto out;
+@@ -565,6 +571,17 @@ full_scan:
+ return err;
+ }
+
++ if ((buf[0] == 0xde) &&
++ (buf[1] == 0xad) &&
++ (buf[2] == 0xc0) &&
++ (buf[3] == 0xde)) {
++ /* end of filesystem. erase everything after this point */
++ printk("%s(): End of filesystem marker found at 0x%x\n", __func__, jeb->offset);
++ c->flags |= (1 << 7);
++
++ return BLK_STATE_ALLFF;
++ }
++
+ /* We temporarily use 'ofs' as a pointer into the buffer/jeb */
+ ofs = 0;
+ max_ofs = EMPTY_SCAN_SIZE(c->sector_size);
diff --git a/target/linux/generic/pending-5.10/600-netfilter_conntrack_flush.patch b/target/linux/generic/pending-5.10/600-netfilter_conntrack_flush.patch
new file mode 100644
index 0000000000..f82849c356
--- /dev/null
+++ b/target/linux/generic/pending-5.10/600-netfilter_conntrack_flush.patch
@@ -0,0 +1,88 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: netfilter: add support for flushing conntrack via /proc
+
+lede-commit 8193bbe59a74d34d6a26d4a8cb857b1952905314
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ net/netfilter/nf_conntrack_standalone.c | 59 ++++++++++++++++++++++++++++++++-
+ 1 file changed, 58 insertions(+), 1 deletion(-)
+
+--- a/net/netfilter/nf_conntrack_standalone.c
++++ b/net/netfilter/nf_conntrack_standalone.c
+@@ -9,6 +9,7 @@
+ #include <linux/percpu.h>
+ #include <linux/netdevice.h>
+ #include <linux/security.h>
++#include <linux/inet.h>
+ #include <net/net_namespace.h>
+ #ifdef CONFIG_SYSCTL
+ #include <linux/sysctl.h>
+@@ -456,6 +457,56 @@ static int ct_cpu_seq_show(struct seq_fi
+ return 0;
+ }
+
++struct kill_request {
++ u16 family;
++ union nf_inet_addr addr;
++};
++
++static int kill_matching(struct nf_conn *i, void *data)
++{
++ struct kill_request *kr = data;
++ struct nf_conntrack_tuple *t1 = &i->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
++ struct nf_conntrack_tuple *t2 = &i->tuplehash[IP_CT_DIR_REPLY].tuple;
++
++ if (!kr->family)
++ return 1;
++
++ if (t1->src.l3num != kr->family)
++ return 0;
++
++ return (nf_inet_addr_cmp(&kr->addr, &t1->src.u3) ||
++ nf_inet_addr_cmp(&kr->addr, &t1->dst.u3) ||
++ nf_inet_addr_cmp(&kr->addr, &t2->src.u3) ||
++ nf_inet_addr_cmp(&kr->addr, &t2->dst.u3));
++}
++
++static int ct_file_write(struct file *file, char *buf, size_t count)
++{
++ struct seq_file *seq = file->private_data;
++ struct net *net = seq_file_net(seq);
++ struct kill_request kr = { };
++
++ if (count == 0)
++ return 0;
++
++ if (count >= INET6_ADDRSTRLEN)
++ count = INET6_ADDRSTRLEN - 1;
++
++ if (strnchr(buf, count, ':')) {
++ kr.family = AF_INET6;
++ if (!in6_pton(buf, count, (void *)&kr.addr, '\n', NULL))
++ return -EINVAL;
++ } else if (strnchr(buf, count, '.')) {
++ kr.family = AF_INET;
++ if (!in4_pton(buf, count, (void *)&kr.addr, '\n', NULL))
++ return -EINVAL;
++ }
++
++ nf_ct_iterate_cleanup_net(net, kill_matching, &kr, 0, 0);
++
++ return 0;
++}
++
+ static const struct seq_operations ct_cpu_seq_ops = {
+ .start = ct_cpu_seq_start,
+ .next = ct_cpu_seq_next,
+@@ -469,8 +520,9 @@ static int nf_conntrack_standalone_init_
+ kuid_t root_uid;
+ kgid_t root_gid;
+
+- pde = proc_create_net("nf_conntrack", 0440, net->proc_net, &ct_seq_ops,
+- sizeof(struct ct_iter_state));
++ pde = proc_create_net_data_write("nf_conntrack", 0440, net->proc_net,
++ &ct_seq_ops, &ct_file_write,
++ sizeof(struct ct_iter_state), NULL);
+ if (!pde)
+ goto out_nf_conntrack;
+
diff --git a/target/linux/generic/pending-5.10/610-netfilter_match_bypass_default_checks.patch b/target/linux/generic/pending-5.10/610-netfilter_match_bypass_default_checks.patch
new file mode 100644
index 0000000000..04f9ba2f08
--- /dev/null
+++ b/target/linux/generic/pending-5.10/610-netfilter_match_bypass_default_checks.patch
@@ -0,0 +1,110 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: kernel: add a new version of my netfilter speedup patches for linux 2.6.39 and 3.0
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ include/uapi/linux/netfilter_ipv4/ip_tables.h | 1 +
+ net/ipv4/netfilter/ip_tables.c | 37 +++++++++++++++++++++++++++
+ 2 files changed, 38 insertions(+)
+
+--- a/include/uapi/linux/netfilter_ipv4/ip_tables.h
++++ b/include/uapi/linux/netfilter_ipv4/ip_tables.h
+@@ -89,6 +89,7 @@ struct ipt_ip {
+ #define IPT_F_FRAG 0x01 /* Set if rule is a fragment rule */
+ #define IPT_F_GOTO 0x02 /* Set if jump is a goto */
+ #define IPT_F_MASK 0x03 /* All possible flag bits mask. */
++#define IPT_F_NO_DEF_MATCH 0x80 /* Internal: no default match rules present */
+
+ /* Values for "inv" field in struct ipt_ip. */
+ #define IPT_INV_VIA_IN 0x01 /* Invert the sense of IN IFACE. */
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -50,6 +50,9 @@ ip_packet_match(const struct iphdr *ip,
+ {
+ unsigned long ret;
+
++ if (ipinfo->flags & IPT_F_NO_DEF_MATCH)
++ return true;
++
+ if (NF_INVF(ipinfo, IPT_INV_SRCIP,
+ (ip->saddr & ipinfo->smsk.s_addr) != ipinfo->src.s_addr) ||
+ NF_INVF(ipinfo, IPT_INV_DSTIP,
+@@ -80,6 +83,29 @@ ip_packet_match(const struct iphdr *ip,
+ return true;
+ }
+
++static void
++ip_checkdefault(struct ipt_ip *ip)
++{
++ static const char iface_mask[IFNAMSIZ] = {};
++
++ if (ip->invflags || ip->flags & IPT_F_FRAG)
++ return;
++
++ if (memcmp(ip->iniface_mask, iface_mask, IFNAMSIZ) != 0)
++ return;
++
++ if (memcmp(ip->outiface_mask, iface_mask, IFNAMSIZ) != 0)
++ return;
++
++ if (ip->smsk.s_addr || ip->dmsk.s_addr)
++ return;
++
++ if (ip->proto)
++ return;
++
++ ip->flags |= IPT_F_NO_DEF_MATCH;
++}
++
+ static bool
+ ip_checkentry(const struct ipt_ip *ip)
+ {
+@@ -524,6 +550,8 @@ find_check_entry(struct ipt_entry *e, st
+ struct xt_mtchk_param mtpar;
+ struct xt_entry_match *ematch;
+
++ ip_checkdefault(&e->ip);
++
+ if (!xt_percpu_counter_alloc(alloc_state, &e->counters))
+ return -ENOMEM;
+
+@@ -818,6 +846,7 @@ copy_entries_to_user(unsigned int total_
+ const struct xt_table_info *private = xt_table_get_private_protected(table);
+ int ret = 0;
+ const void *loc_cpu_entry;
++ u8 flags;
+
+ counters = alloc_counters(table);
+ if (IS_ERR(counters))
+@@ -845,6 +874,14 @@ copy_entries_to_user(unsigned int total_
+ goto free_counters;
+ }
+
++ flags = e->ip.flags & IPT_F_MASK;
++ if (copy_to_user(userptr + off
++ + offsetof(struct ipt_entry, ip.flags),
++ &flags, sizeof(flags)) != 0) {
++ ret = -EFAULT;
++ goto free_counters;
++ }
++
+ for (i = sizeof(struct ipt_entry);
+ i < e->target_offset;
+ i += m->u.match_size) {
+@@ -1223,12 +1260,15 @@ compat_copy_entry_to_user(struct ipt_ent
+ compat_uint_t origsize;
+ const struct xt_entry_match *ematch;
+ int ret = 0;
++ u8 flags = e->ip.flags & IPT_F_MASK;
+
+ origsize = *size;
+ ce = *dstptr;
+ if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 ||
+ copy_to_user(&ce->counters, &counters[i],
+- sizeof(counters[i])) != 0)
++ sizeof(counters[i])) != 0 ||
++ copy_to_user(&ce->ip.flags, &flags,
++ sizeof(flags)) != 0)
+ return -EFAULT;
+
+ *dstptr += sizeof(struct compat_ipt_entry);
diff --git a/target/linux/generic/pending-5.10/611-netfilter_match_bypass_default_table.patch b/target/linux/generic/pending-5.10/611-netfilter_match_bypass_default_table.patch
new file mode 100644
index 0000000000..1792534835
--- /dev/null
+++ b/target/linux/generic/pending-5.10/611-netfilter_match_bypass_default_table.patch
@@ -0,0 +1,106 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: netfilter: match bypass default table
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ net/ipv4/netfilter/ip_tables.c | 79 +++++++++++++++++++++++++++++++-----------
+ 1 file changed, 58 insertions(+), 21 deletions(-)
+
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -246,6 +246,33 @@ struct ipt_entry *ipt_next_entry(const s
+ return (void *)entry + entry->next_offset;
+ }
+
++static bool
++ipt_handle_default_rule(struct ipt_entry *e, unsigned int *verdict)
++{
++ struct xt_entry_target *t;
++ struct xt_standard_target *st;
++
++ if (e->target_offset != sizeof(struct ipt_entry))
++ return false;
++
++ if (!(e->ip.flags & IPT_F_NO_DEF_MATCH))
++ return false;
++
++ t = ipt_get_target(e);
++ if (t->u.kernel.target->target)
++ return false;
++
++ st = (struct xt_standard_target *) t;
++ if (st->verdict == XT_RETURN)
++ return false;
++
++ if (st->verdict >= 0)
++ return false;
++
++ *verdict = (unsigned)(-st->verdict) - 1;
++ return true;
++}
++
+ /* Returns one of the generic firewall policies, like NF_ACCEPT. */
+ unsigned int
+ ipt_do_table(struct sk_buff *skb,
+@@ -266,27 +293,28 @@ ipt_do_table(struct sk_buff *skb,
+ unsigned int addend;
+
+ /* Initialization */
++ WARN_ON(!(table->valid_hooks & (1 << hook)));
++ local_bh_disable();
++ private = rcu_access_pointer(table->private);
++ cpu = smp_processor_id();
++ table_base = private->entries;
++
++ e = get_entry(table_base, private->hook_entry[hook]);
++ if (ipt_handle_default_rule(e, &verdict)) {
++ struct xt_counters *counter;
++
++ counter = xt_get_this_cpu_counter(&e->counters);
++ ADD_COUNTER(*counter, skb->len, 1);
++ local_bh_enable();
++ return verdict;
++ }
++
+ stackidx = 0;
+ ip = ip_hdr(skb);
+ indev = state->in ? state->in->name : nulldevname;
+ outdev = state->out ? state->out->name : nulldevname;
+- /* We handle fragments by dealing with the first fragment as
+- * if it was a normal packet. All other fragments are treated
+- * normally, except that they will NEVER match rules that ask
+- * things we don't know, ie. tcp syn flag or ports). If the
+- * rule is also a fragment-specific rule, non-fragments won't
+- * match it. */
+- acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
+- acpar.thoff = ip_hdrlen(skb);
+- acpar.hotdrop = false;
+- acpar.state = state;
+
+- WARN_ON(!(table->valid_hooks & (1 << hook)));
+- local_bh_disable();
+ addend = xt_write_recseq_begin();
+- private = rcu_access_pointer(table->private);
+- cpu = smp_processor_id();
+- table_base = private->entries;
+ jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
+
+ /* Switch to alternate jumpstack if we're being invoked via TEE.
+@@ -299,7 +327,16 @@ ipt_do_table(struct sk_buff *skb,
+ if (static_key_false(&xt_tee_enabled))
+ jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
+
+- e = get_entry(table_base, private->hook_entry[hook]);
++ /* We handle fragments by dealing with the first fragment as
++ * if it was a normal packet. All other fragments are treated
++ * normally, except that they will NEVER match rules that ask
++ * things we don't know, ie. tcp syn flag or ports). If the
++ * rule is also a fragment-specific rule, non-fragments won't
++ * match it. */
++ acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
++ acpar.thoff = ip_hdrlen(skb);
++ acpar.hotdrop = false;
++ acpar.state = state;
+
+ do {
+ const struct xt_entry_target *t;
diff --git a/target/linux/generic/pending-5.10/612-netfilter_match_reduce_memory_access.patch b/target/linux/generic/pending-5.10/612-netfilter_match_reduce_memory_access.patch
new file mode 100644
index 0000000000..79da6778b6
--- /dev/null
+++ b/target/linux/generic/pending-5.10/612-netfilter_match_reduce_memory_access.patch
@@ -0,0 +1,22 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: netfilter: reduce match memory access
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ net/ipv4/netfilter/ip_tables.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -53,9 +53,9 @@ ip_packet_match(const struct iphdr *ip,
+ if (ipinfo->flags & IPT_F_NO_DEF_MATCH)
+ return true;
+
+- if (NF_INVF(ipinfo, IPT_INV_SRCIP,
++ if (NF_INVF(ipinfo, IPT_INV_SRCIP, ipinfo->smsk.s_addr &&
+ (ip->saddr & ipinfo->smsk.s_addr) != ipinfo->src.s_addr) ||
+- NF_INVF(ipinfo, IPT_INV_DSTIP,
++ NF_INVF(ipinfo, IPT_INV_DSTIP, ipinfo->dmsk.s_addr &&
+ (ip->daddr & ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr))
+ return false;
+
diff --git a/target/linux/generic/pending-5.10/613-netfilter_optional_tcp_window_check.patch b/target/linux/generic/pending-5.10/613-netfilter_optional_tcp_window_check.patch
new file mode 100644
index 0000000000..61776921c6
--- /dev/null
+++ b/target/linux/generic/pending-5.10/613-netfilter_optional_tcp_window_check.patch
@@ -0,0 +1,73 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: netfilter: optional tcp window check
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ net/netfilter/nf_conntrack_proto_tcp.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/net/netfilter/nf_conntrack_proto_tcp.c
++++ b/net/netfilter/nf_conntrack_proto_tcp.c
+@@ -31,6 +31,9 @@
+ #include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
+ #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
+
++/* Do not check the TCP window for incoming packets */
++static int nf_ct_tcp_no_window_check __read_mostly = 1;
++
+ /* "Be conservative in what you do,
+ be liberal in what you accept from others."
+ If it's non-zero, we mark only out of window RST segments as INVALID. */
+@@ -476,6 +479,9 @@ static bool tcp_in_window(const struct n
+ s32 receiver_offset;
+ bool res, in_recv_win;
+
++ if (nf_ct_tcp_no_window_check)
++ return true;
++
+ /*
+ * Get the required data from the packet.
+ */
+@@ -1130,7 +1136,7 @@ int nf_conntrack_tcp_packet(struct nf_co
+ IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED &&
+ timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK])
+ timeout = timeouts[TCP_CONNTRACK_UNACK];
+- else if (ct->proto.tcp.last_win == 0 &&
++ else if (!nf_ct_tcp_no_window_check && ct->proto.tcp.last_win == 0 &&
+ timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
+ timeout = timeouts[TCP_CONNTRACK_RETRANS];
+ else
+--- a/net/netfilter/nf_conntrack_standalone.c
++++ b/net/netfilter/nf_conntrack_standalone.c
+@@ -25,6 +25,9 @@
+ #include <net/netfilter/nf_conntrack_timestamp.h>
+ #include <linux/rculist_nulls.h>
+
++/* Do not check the TCP window for incoming packets */
++static int nf_ct_tcp_no_window_check __read_mostly = 1;
++
+ static bool enable_hooks __read_mostly;
+ MODULE_PARM_DESC(enable_hooks, "Always enable conntrack hooks");
+ module_param(enable_hooks, bool, 0000);
+@@ -651,6 +654,7 @@ enum nf_ct_sysctl_index {
+ NF_SYSCTL_CT_PROTO_TIMEOUT_GRE_STREAM,
+ #endif
+
++ NF_SYSCTL_CT_PROTO_TCP_NO_WINDOW_CHECK,
+ __NF_SYSCTL_CT_LAST_SYSCTL,
+ };
+
+@@ -977,6 +981,13 @@ static struct ctl_table nf_ct_sysctl_tab
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ #endif
++ [NF_SYSCTL_CT_PROTO_TCP_NO_WINDOW_CHECK] = {
++ .procname = "nf_conntrack_tcp_no_window_check",
++ .data = &nf_ct_tcp_no_window_check,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec,
++ },
+ {}
+ };
+
diff --git a/target/linux/generic/pending-5.10/620-net_sched-codel-do-not-defer-queue-length-update.patch b/target/linux/generic/pending-5.10/620-net_sched-codel-do-not-defer-queue-length-update.patch
new file mode 100644
index 0000000000..4b4825ae3b
--- /dev/null
+++ b/target/linux/generic/pending-5.10/620-net_sched-codel-do-not-defer-queue-length-update.patch
@@ -0,0 +1,86 @@
+From: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+Date: Mon, 21 Aug 2017 11:14:14 +0300
+Subject: [PATCH] net_sched/codel: do not defer queue length update
+
+When codel wants to drop last packet in ->dequeue() it cannot call
+qdisc_tree_reduce_backlog() right away - it will notify parent qdisc
+about zero qlen and HTB/HFSC will deactivate class. The same class will
+be deactivated second time by caller of ->dequeue(). Currently codel and
+fq_codel defer update. This triggers warning in HFSC when it's qlen != 0
+but there is no active classes.
+
+This patch update parent queue length immediately: just temporary increase
+qlen around qdisc_tree_reduce_backlog() to prevent first class deactivation
+if we have skb to return.
+
+This might open another problem in HFSC - now operation peek could fail and
+deactivate parent class.
+
+Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=109581
+---
+
+--- a/net/sched/sch_codel.c
++++ b/net/sched/sch_codel.c
+@@ -95,11 +95,17 @@ static struct sk_buff *codel_qdisc_deque
+ &q->stats, qdisc_pkt_len, codel_get_enqueue_time,
+ drop_func, dequeue_func);
+
+- /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
+- * or HTB crashes. Defer it for next round.
++ /* If our qlen is 0 qdisc_tree_reduce_backlog() will deactivate
++ * parent class, dequeue in parent qdisc will do the same if we
++ * return skb. Temporary increment qlen if we have skb.
+ */
+- if (q->stats.drop_count && sch->q.qlen) {
+- qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
++ if (q->stats.drop_count) {
++ if (skb)
++ sch->q.qlen++;
++ qdisc_tree_reduce_backlog(sch, q->stats.drop_count,
++ q->stats.drop_len);
++ if (skb)
++ sch->q.qlen--;
+ q->stats.drop_count = 0;
+ q->stats.drop_len = 0;
+ }
+--- a/net/sched/sch_fq_codel.c
++++ b/net/sched/sch_fq_codel.c
+@@ -304,6 +304,21 @@ begin:
+ &flow->cvars, &q->cstats, qdisc_pkt_len,
+ codel_get_enqueue_time, drop_func, dequeue_func);
+
++ /* If our qlen is 0 qdisc_tree_reduce_backlog() will deactivate
++ * parent class, dequeue in parent qdisc will do the same if we
++ * return skb. Temporary increment qlen if we have skb.
++ */
++ if (q->cstats.drop_count) {
++ if (skb)
++ sch->q.qlen++;
++ qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
++ q->cstats.drop_len);
++ if (skb)
++ sch->q.qlen--;
++ q->cstats.drop_count = 0;
++ q->cstats.drop_len = 0;
++ }
++
+ if (!skb) {
+ /* force a pass through old_flows to prevent starvation */
+ if ((head == &q->new_flows) && !list_empty(&q->old_flows))
+@@ -314,15 +329,6 @@ begin:
+ }
+ qdisc_bstats_update(sch, skb);
+ flow->deficit -= qdisc_pkt_len(skb);
+- /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
+- * or HTB crashes. Defer it for next round.
+- */
+- if (q->cstats.drop_count && sch->q.qlen) {
+- qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
+- q->cstats.drop_len);
+- q->cstats.drop_count = 0;
+- q->cstats.drop_len = 0;
+- }
+ return skb;
+ }
+
diff --git a/target/linux/generic/pending-5.10/630-packet_socket_type.patch b/target/linux/generic/pending-5.10/630-packet_socket_type.patch
new file mode 100644
index 0000000000..104bcbe5a5
--- /dev/null
+++ b/target/linux/generic/pending-5.10/630-packet_socket_type.patch
@@ -0,0 +1,138 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: net: add an optimization for dealing with raw sockets
+
+lede-commit: 4898039703d7315f0f3431c860123338ec3be0f6
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ include/uapi/linux/if_packet.h | 3 +++
+ net/packet/af_packet.c | 34 +++++++++++++++++++++++++++-------
+ net/packet/internal.h | 1 +
+ 3 files changed, 31 insertions(+), 7 deletions(-)
+
+--- a/include/uapi/linux/if_packet.h
++++ b/include/uapi/linux/if_packet.h
+@@ -32,6 +32,8 @@ struct sockaddr_ll {
+ #define PACKET_KERNEL 7 /* To kernel space */
+ /* Unused, PACKET_FASTROUTE and PACKET_LOOPBACK are invisible to user space */
+ #define PACKET_FASTROUTE 6 /* Fastrouted frame */
++#define PACKET_MASK_ANY 0xffffffff /* mask for packet type bits */
++
+
+ /* Packet socket options */
+
+@@ -58,6 +60,7 @@ struct sockaddr_ll {
+ #define PACKET_ROLLOVER_STATS 21
+ #define PACKET_FANOUT_DATA 22
+ #define PACKET_IGNORE_OUTGOING 23
++#define PACKET_RECV_TYPE 24
+
+ #define PACKET_FANOUT_HASH 0
+ #define PACKET_FANOUT_LB 1
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1801,6 +1801,7 @@ static int packet_rcv_spkt(struct sk_buf
+ {
+ struct sock *sk;
+ struct sockaddr_pkt *spkt;
++ struct packet_sock *po;
+
+ /*
+ * When we registered the protocol we saved the socket in the data
+@@ -1808,6 +1809,7 @@ static int packet_rcv_spkt(struct sk_buf
+ */
+
+ sk = pt->af_packet_priv;
++ po = pkt_sk(sk);
+
+ /*
+ * Yank back the headers [hope the device set this
+@@ -1820,7 +1822,7 @@ static int packet_rcv_spkt(struct sk_buf
+ * so that this procedure is noop.
+ */
+
+- if (skb->pkt_type == PACKET_LOOPBACK)
++ if (!(po->pkt_type & (1 << skb->pkt_type)))
+ goto out;
+
+ if (!net_eq(dev_net(dev), sock_net(sk)))
+@@ -2058,12 +2060,12 @@ static int packet_rcv(struct sk_buff *sk
+ unsigned int snaplen, res;
+ bool is_drop_n_account = false;
+
+- if (skb->pkt_type == PACKET_LOOPBACK)
+- goto drop;
+-
+ sk = pt->af_packet_priv;
+ po = pkt_sk(sk);
+
++ if (!(po->pkt_type & (1 << skb->pkt_type)))
++ goto drop;
++
+ if (!net_eq(dev_net(dev), sock_net(sk)))
+ goto drop;
+
+@@ -2189,12 +2191,12 @@ static int tpacket_rcv(struct sk_buff *s
+ BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
+ BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
+
+- if (skb->pkt_type == PACKET_LOOPBACK)
+- goto drop;
+-
+ sk = pt->af_packet_priv;
+ po = pkt_sk(sk);
+
++ if (!(po->pkt_type & (1 << skb->pkt_type)))
++ goto drop;
++
+ if (!net_eq(dev_net(dev), sock_net(sk)))
+ goto drop;
+
+@@ -3293,6 +3295,7 @@ static int packet_create(struct net *net
+ mutex_init(&po->pg_vec_lock);
+ po->rollover = NULL;
+ po->prot_hook.func = packet_rcv;
++ po->pkt_type = PACKET_MASK_ANY & ~(1 << PACKET_LOOPBACK);
+
+ if (sock->type == SOCK_PACKET)
+ po->prot_hook.func = packet_rcv_spkt;
+@@ -3927,6 +3930,16 @@ packet_setsockopt(struct socket *sock, i
+ po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
+ return 0;
+ }
++ case PACKET_RECV_TYPE:
++ {
++ unsigned int val;
++ if (optlen != sizeof(val))
++ return -EINVAL;
++ if (copy_from_sockptr(&val, optval, sizeof(val)))
++ return -EFAULT;
++ po->pkt_type = val & ~BIT(PACKET_LOOPBACK);
++ return 0;
++ }
+ default:
+ return -ENOPROTOOPT;
+ }
+@@ -3983,6 +3996,13 @@ static int packet_getsockopt(struct sock
+ case PACKET_VNET_HDR:
+ val = po->has_vnet_hdr;
+ break;
++ case PACKET_RECV_TYPE:
++ if (len > sizeof(unsigned int))
++ len = sizeof(unsigned int);
++ val = po->pkt_type;
++
++ data = &val;
++ break;
+ case PACKET_VERSION:
+ val = po->tp_version;
+ break;
+--- a/net/packet/internal.h
++++ b/net/packet/internal.h
+@@ -136,6 +136,7 @@ struct packet_sock {
+ int (*xmit)(struct sk_buff *skb);
+ struct packet_type prot_hook ____cacheline_aligned_in_smp;
+ atomic_t tp_drops ____cacheline_aligned_in_smp;
++ unsigned int pkt_type;
+ };
+
+ static struct packet_sock *pkt_sk(struct sock *sk)
diff --git a/target/linux/generic/pending-5.10/640-00-netfilter-flowtable-add-hash-offset-field-to-tuple.patch b/target/linux/generic/pending-5.10/640-00-netfilter-flowtable-add-hash-offset-field-to-tuple.patch
new file mode 100644
index 0000000000..c881ccfcb0
--- /dev/null
+++ b/target/linux/generic/pending-5.10/640-00-netfilter-flowtable-add-hash-offset-field-to-tuple.patch
@@ -0,0 +1,52 @@
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Fri, 20 Nov 2020 13:49:13 +0100
+Subject: [PATCH] netfilter: flowtable: add hash offset field to tuple
+
+Add a placeholder field to calculate hash tuple offset. Similar to
+2c407aca6497 ("netfilter: conntrack: avoid gcc-10 zero-length-bounds
+warning").
+
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+---
+
+--- a/include/net/netfilter/nf_flow_table.h
++++ b/include/net/netfilter/nf_flow_table.h
+@@ -107,6 +107,10 @@ struct flow_offload_tuple {
+
+ u8 l3proto;
+ u8 l4proto;
++
++ /* All members above are keys for lookups, see flow_offload_hash(). */
++ struct { } __hash;
++
+ u8 dir;
+
+ u16 mtu;
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -191,14 +191,14 @@ static u32 flow_offload_hash(const void
+ {
+ const struct flow_offload_tuple *tuple = data;
+
+- return jhash(tuple, offsetof(struct flow_offload_tuple, dir), seed);
++ return jhash(tuple, offsetof(struct flow_offload_tuple, __hash), seed);
+ }
+
+ static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed)
+ {
+ const struct flow_offload_tuple_rhash *tuplehash = data;
+
+- return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, dir), seed);
++ return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, __hash), seed);
+ }
+
+ static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg,
+@@ -207,7 +207,7 @@ static int flow_offload_hash_cmp(struct
+ const struct flow_offload_tuple *tuple = arg->key;
+ const struct flow_offload_tuple_rhash *x = ptr;
+
+- if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, dir)))
++ if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, __hash)))
+ return 1;
+
+ return 0;
diff --git a/target/linux/generic/pending-5.10/640-01-netfilter-flowtable-add-xmit-path-types.patch b/target/linux/generic/pending-5.10/640-01-netfilter-flowtable-add-xmit-path-types.patch
new file mode 100644
index 0000000000..e47d03e48d
--- /dev/null
+++ b/target/linux/generic/pending-5.10/640-01-netfilter-flowtable-add-xmit-path-types.patch
@@ -0,0 +1,188 @@
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Fri, 20 Nov 2020 13:49:14 +0100
+Subject: [PATCH] netfilter: flowtable: add xmit path types
+
+Add the xmit_type field that defines the two supported xmit paths in the
+flowtable data plane, which are the neighbour and the xfrm xmit paths.
+This patch prepares for new flowtable xmit path types to come.
+
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+---
+
+--- a/include/net/netfilter/nf_flow_table.h
++++ b/include/net/netfilter/nf_flow_table.h
+@@ -89,6 +89,11 @@ enum flow_offload_tuple_dir {
+ FLOW_OFFLOAD_DIR_MAX = IP_CT_DIR_MAX
+ };
+
++enum flow_offload_xmit_type {
++ FLOW_OFFLOAD_XMIT_NEIGH = 0,
++ FLOW_OFFLOAD_XMIT_XFRM,
++};
++
+ struct flow_offload_tuple {
+ union {
+ struct in_addr src_v4;
+@@ -111,7 +116,8 @@ struct flow_offload_tuple {
+ /* All members above are keys for lookups, see flow_offload_hash(). */
+ struct { } __hash;
+
+- u8 dir;
++ u8 dir:6,
++ xmit_type:2;
+
+ u16 mtu;
+
+@@ -158,7 +164,8 @@ static inline __s32 nf_flow_timeout_delt
+
+ struct nf_flow_route {
+ struct {
+- struct dst_entry *dst;
++ struct dst_entry *dst;
++ enum flow_offload_xmit_type xmit_type;
+ } tuple[FLOW_OFFLOAD_DIR_MAX];
+ };
+
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -95,6 +95,7 @@ static int flow_offload_fill_route(struc
+ }
+
+ flow_tuple->iifidx = other_dst->dev->ifindex;
++ flow_tuple->xmit_type = route->tuple[dir].xmit_type;
+ flow_tuple->dst_cache = dst;
+
+ return 0;
+--- a/net/netfilter/nf_flow_table_ip.c
++++ b/net/netfilter/nf_flow_table_ip.c
+@@ -220,10 +220,20 @@ static bool nf_flow_exceeds_mtu(const st
+ return true;
+ }
+
+-static int nf_flow_offload_dst_check(struct dst_entry *dst)
++static inline struct dst_entry *
++nft_flow_dst(struct flow_offload_tuple_rhash *tuplehash)
+ {
+- if (unlikely(dst_xfrm(dst)))
++ return tuplehash->tuple.dst_cache;
++}
++
++static int nf_flow_offload_dst_check(struct flow_offload_tuple_rhash *tuplehash)
++{
++ struct dst_entry *dst;
++
++ if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
++ dst = nft_flow_dst(tuplehash);
+ return dst_check(dst, 0) ? 0 : -1;
++ }
+
+ return 0;
+ }
+@@ -265,8 +275,6 @@ nf_flow_offload_ip_hook(void *priv, stru
+
+ dir = tuplehash->tuple.dir;
+ flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
+- rt = (struct rtable *)flow->tuplehash[dir].tuple.dst_cache;
+- outdev = rt->dst.dev;
+
+ if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
+ return NF_ACCEPT;
+@@ -280,7 +288,7 @@ nf_flow_offload_ip_hook(void *priv, stru
+
+ flow_offload_refresh(flow_table, flow);
+
+- if (nf_flow_offload_dst_check(&rt->dst)) {
++ if (nf_flow_offload_dst_check(tuplehash)) {
+ flow_offload_teardown(flow);
+ return NF_ACCEPT;
+ }
+@@ -295,13 +303,16 @@ nf_flow_offload_ip_hook(void *priv, stru
+ if (flow_table->flags & NF_FLOWTABLE_COUNTER)
+ nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
+
+- if (unlikely(dst_xfrm(&rt->dst))) {
++ rt = (struct rtable *)tuplehash->tuple.dst_cache;
++
++ if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
+ memset(skb->cb, 0, sizeof(struct inet_skb_parm));
+ IPCB(skb)->iif = skb->dev->ifindex;
+ IPCB(skb)->flags = IPSKB_FORWARDED;
+ return nf_flow_xmit_xfrm(skb, state, &rt->dst);
+ }
+
++ outdev = rt->dst.dev;
+ skb->dev = outdev;
+ nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
+ skb_dst_set_noref(skb, &rt->dst);
+@@ -506,8 +517,6 @@ nf_flow_offload_ipv6_hook(void *priv, st
+
+ dir = tuplehash->tuple.dir;
+ flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
+- rt = (struct rt6_info *)flow->tuplehash[dir].tuple.dst_cache;
+- outdev = rt->dst.dev;
+
+ if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
+ return NF_ACCEPT;
+@@ -518,7 +527,7 @@ nf_flow_offload_ipv6_hook(void *priv, st
+
+ flow_offload_refresh(flow_table, flow);
+
+- if (nf_flow_offload_dst_check(&rt->dst)) {
++ if (nf_flow_offload_dst_check(tuplehash)) {
+ flow_offload_teardown(flow);
+ return NF_ACCEPT;
+ }
+@@ -536,13 +545,16 @@ nf_flow_offload_ipv6_hook(void *priv, st
+ if (flow_table->flags & NF_FLOWTABLE_COUNTER)
+ nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
+
+- if (unlikely(dst_xfrm(&rt->dst))) {
++ rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
++
++ if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
+ memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
+ IP6CB(skb)->iif = skb->dev->ifindex;
+ IP6CB(skb)->flags = IP6SKB_FORWARDED;
+ return nf_flow_xmit_xfrm(skb, state, &rt->dst);
+ }
+
++ outdev = rt->dst.dev;
+ skb->dev = outdev;
+ nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
+ skb_dst_set_noref(skb, &rt->dst);
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -19,6 +19,22 @@ struct nft_flow_offload {
+ struct nft_flowtable *flowtable;
+ };
+
++static enum flow_offload_xmit_type nft_xmit_type(struct dst_entry *dst)
++{
++ if (dst_xfrm(dst))
++ return FLOW_OFFLOAD_XMIT_XFRM;
++
++ return FLOW_OFFLOAD_XMIT_NEIGH;
++}
++
++static void nft_default_forward_path(struct nf_flow_route *route,
++ struct dst_entry *dst_cache,
++ enum ip_conntrack_dir dir)
++{
++ route->tuple[dir].dst = dst_cache;
++ route->tuple[dir].xmit_type = nft_xmit_type(dst_cache);
++}
++
+ static int nft_flow_route(const struct nft_pktinfo *pkt,
+ const struct nf_conn *ct,
+ struct nf_flow_route *route,
+@@ -44,8 +60,8 @@ static int nft_flow_route(const struct n
+ if (!other_dst)
+ return -ENOENT;
+
+- route->tuple[dir].dst = this_dst;
+- route->tuple[!dir].dst = other_dst;
++ nft_default_forward_path(route, this_dst, dir);
++ nft_default_forward_path(route, other_dst, !dir);
+
+ return 0;
+ }
diff --git a/target/linux/generic/pending-5.10/640-02-net-resolve-forwarding-path-from-virtual-netdevice-a.patch b/target/linux/generic/pending-5.10/640-02-net-resolve-forwarding-path-from-virtual-netdevice-a.patch
new file mode 100644
index 0000000000..953446baab
--- /dev/null
+++ b/target/linux/generic/pending-5.10/640-02-net-resolve-forwarding-path-from-virtual-netdevice-a.patch
@@ -0,0 +1,170 @@
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Sun, 10 Jan 2021 15:53:58 +0100
+Subject: [PATCH] net: resolve forwarding path from virtual netdevice and
+ HW destination address
+
+This patch adds dev_fill_forward_path() which resolves the path to reach
+the real netdevice from the IP forwarding side. This function takes as
+input the netdevice and the destination hardware address and it walks
+down the devices calling .ndo_fill_forward_path() for each device until
+the real device is found.
+
+For instance, assuming the following topology:
+
+ IP forwarding
+ / \
+ br0 eth0
+ / \
+ eth1 eth2
+ .
+ .
+ .
+ ethX
+ ab:cd:ef:ab:cd:ef
+
+where eth1 and eth2 are bridge ports and eth0 provides WAN connectivity.
+ethX is the interface in another box which is connected to the eth1
+bridge port.
+
+For packets going through IP forwarding to br0 whose destination MAC
+address is ab:cd:ef:ab:cd:ef, dev_fill_forward_path() provides the
+following path:
+
+ br0 -> eth1
+
+.ndo_fill_forward_path for br0 looks up at the FDB for the bridge port
+from the destination MAC address to get the bridge port eth1.
+
+This information allows to create a fast path that bypasses the classic
+bridge and IP forwarding paths, so packets go directly from the bridge
+port eth1 to eth0 (wan interface) and vice versa.
+
+ fast path
+ .------------------------.
+ / \
+ | IP forwarding |
+ | / \ \/
+ | br0 eth0
+ . / \
+ -> eth1 eth2
+ .
+ .
+ .
+ ethX
+ ab:cd:ef:ab:cd:ef
+
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+---
+
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -833,6 +833,27 @@ typedef u16 (*select_queue_fallback_t)(s
+ struct sk_buff *skb,
+ struct net_device *sb_dev);
+
++enum net_device_path_type {
++ DEV_PATH_ETHERNET = 0,
++};
++
++struct net_device_path {
++ enum net_device_path_type type;
++ const struct net_device *dev;
++};
++
++#define NET_DEVICE_PATH_STACK_MAX 5
++
++struct net_device_path_stack {
++ int num_paths;
++ struct net_device_path path[NET_DEVICE_PATH_STACK_MAX];
++};
++
++struct net_device_path_ctx {
++ const struct net_device *dev;
++ const u8 *daddr;
++};
++
+ enum tc_setup_type {
+ TC_SETUP_QDISC_MQPRIO,
+ TC_SETUP_CLSU32,
+@@ -1279,6 +1300,8 @@ struct netdev_net_notifier {
+ * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev);
+ * If a device is paired with a peer device, return the peer instance.
+ * The caller must be under RCU read context.
++ * int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, struct net_device_path *path);
++ * Get the forwarding path to reach the real device from the HW destination address
+ */
+ struct net_device_ops {
+ int (*ndo_init)(struct net_device *dev);
+@@ -1487,6 +1510,8 @@ struct net_device_ops {
+ int (*ndo_tunnel_ctl)(struct net_device *dev,
+ struct ip_tunnel_parm *p, int cmd);
+ struct net_device * (*ndo_get_peer_dev)(struct net_device *dev);
++ int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx,
++ struct net_device_path *path);
+ };
+
+ /**
+@@ -2798,6 +2823,8 @@ void dev_remove_offload(struct packet_of
+
+ int dev_get_iflink(const struct net_device *dev);
+ int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
++int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
++ struct net_device_path_stack *stack);
+ struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
+ unsigned short mask);
+ struct net_device *dev_get_by_name(struct net *net, const char *name);
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -846,6 +846,52 @@ int dev_fill_metadata_dst(struct net_dev
+ }
+ EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
+
++static struct net_device_path *dev_fwd_path(struct net_device_path_stack *stack)
++{
++ int k = stack->num_paths++;
++
++ if (WARN_ON_ONCE(k >= NET_DEVICE_PATH_STACK_MAX))
++ return NULL;
++
++ return &stack->path[k];
++}
++
++int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
++ struct net_device_path_stack *stack)
++{
++ const struct net_device *last_dev;
++ struct net_device_path_ctx ctx = {
++ .dev = dev,
++ .daddr = daddr,
++ };
++ struct net_device_path *path;
++ int ret = 0;
++
++ stack->num_paths = 0;
++ while (ctx.dev && ctx.dev->netdev_ops->ndo_fill_forward_path) {
++ last_dev = ctx.dev;
++ path = dev_fwd_path(stack);
++ if (!path)
++ return -1;
++
++ memset(path, 0, sizeof(struct net_device_path));
++ ret = ctx.dev->netdev_ops->ndo_fill_forward_path(&ctx, path);
++ if (ret < 0)
++ return -1;
++
++ if (WARN_ON_ONCE(last_dev == ctx.dev))
++ return -1;
++ }
++ path = dev_fwd_path(stack);
++ if (!path)
++ return -1;
++ path->type = DEV_PATH_ETHERNET;
++ path->dev = ctx.dev;
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(dev_fill_forward_path);
++
+ /**
+ * __dev_get_by_name - find a device by its name
+ * @net: the applicable net namespace
diff --git a/target/linux/generic/pending-5.10/640-03-net-8021q-resolve-forwarding-path-for-vlan-devices.patch b/target/linux/generic/pending-5.10/640-03-net-8021q-resolve-forwarding-path-for-vlan-devices.patch
new file mode 100644
index 0000000000..ddc0de0d3b
--- /dev/null
+++ b/target/linux/generic/pending-5.10/640-03-net-8021q-resolve-forwarding-path-for-vlan-devices.patch
@@ -0,0 +1,80 @@
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Fri, 20 Nov 2020 13:49:16 +0100
+Subject: [PATCH] net: 8021q: resolve forwarding path for vlan devices
+
+Add .ndo_fill_forward_path for vlan devices.
+
+For instance, assuming the following topology:
+
+ IP forwarding
+ / \
+ eth0.100 eth0
+ |
+ eth0
+ .
+ .
+ .
+ ethX
+ ab:cd:ef:ab:cd:ef
+
+For packets going through IP forwarding to eth0.100 whose destination
+MAC address is ab:cd:ef:ab:cd:ef, dev_fill_forward_path() provides the
+following path:
+
+ eth0.100 -> eth0
+
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+---
+
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -835,11 +835,18 @@ typedef u16 (*select_queue_fallback_t)(s
+
+ enum net_device_path_type {
+ DEV_PATH_ETHERNET = 0,
++ DEV_PATH_VLAN,
+ };
+
+ struct net_device_path {
+ enum net_device_path_type type;
+ const struct net_device *dev;
++ union {
++ struct {
++ u16 id;
++ __be16 proto;
++ } vlan;
++ };
+ };
+
+ #define NET_DEVICE_PATH_STACK_MAX 5
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -767,6 +767,20 @@ static int vlan_dev_get_iflink(const str
+ return real_dev->ifindex;
+ }
+
++static int vlan_dev_fill_forward_path(struct net_device_path_ctx *ctx,
++ struct net_device_path *path)
++{
++ struct vlan_dev_priv *vlan = vlan_dev_priv(ctx->dev);
++
++ path->type = DEV_PATH_VLAN;
++ path->vlan.id = vlan->vlan_id;
++ path->vlan.proto = vlan->vlan_proto;
++ path->dev = ctx->dev;
++ ctx->dev = vlan->real_dev;
++
++ return 0;
++}
++
+ static const struct ethtool_ops vlan_ethtool_ops = {
+ .get_link_ksettings = vlan_ethtool_get_link_ksettings,
+ .get_drvinfo = vlan_ethtool_get_drvinfo,
+@@ -805,6 +819,7 @@ static const struct net_device_ops vlan_
+ #endif
+ .ndo_fix_features = vlan_dev_fix_features,
+ .ndo_get_iflink = vlan_dev_get_iflink,
++ .ndo_fill_forward_path = vlan_dev_fill_forward_path,
+ };
+
+ static void vlan_dev_free(struct net_device *dev)
diff --git a/target/linux/generic/pending-5.10/640-04-bridge-resolve-forwarding-path-for-bridge-devices.patch b/target/linux/generic/pending-5.10/640-04-bridge-resolve-forwarding-path-for-bridge-devices.patch
new file mode 100644
index 0000000000..d9093b3da4
--- /dev/null
+++ b/target/linux/generic/pending-5.10/640-04-bridge-resolve-forwarding-path-for-bridge-devices.patch
@@ -0,0 +1,62 @@
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Fri, 20 Nov 2020 13:49:17 +0100
+Subject: [PATCH] bridge: resolve forwarding path for bridge devices
+
+Add .ndo_fill_forward_path for bridge devices.
+
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+---
+
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -836,6 +836,7 @@ typedef u16 (*select_queue_fallback_t)(s
+ enum net_device_path_type {
+ DEV_PATH_ETHERNET = 0,
+ DEV_PATH_VLAN,
++ DEV_PATH_BRIDGE,
+ };
+
+ struct net_device_path {
+--- a/net/bridge/br_device.c
++++ b/net/bridge/br_device.c
+@@ -398,6 +398,32 @@ static int br_del_slave(struct net_devic
+ return br_del_if(br, slave_dev);
+ }
+
++static int br_fill_forward_path(struct net_device_path_ctx *ctx,
++ struct net_device_path *path)
++{
++ struct net_bridge_fdb_entry *f;
++ struct net_bridge_port *dst;
++ struct net_bridge *br;
++
++ if (netif_is_bridge_port(ctx->dev))
++ return -1;
++
++ br = netdev_priv(ctx->dev);
++ f = br_fdb_find_rcu(br, ctx->daddr, 0);
++ if (!f || !f->dst)
++ return -1;
++
++ dst = READ_ONCE(f->dst);
++ if (!dst)
++ return -1;
++
++ path->type = DEV_PATH_BRIDGE;
++ path->dev = dst->br->dev;
++ ctx->dev = dst->dev;
++
++ return 0;
++}
++
+ static const struct ethtool_ops br_ethtool_ops = {
+ .get_drvinfo = br_getinfo,
+ .get_link = ethtool_op_get_link,
+@@ -432,6 +458,7 @@ static const struct net_device_ops br_ne
+ .ndo_bridge_setlink = br_setlink,
+ .ndo_bridge_dellink = br_dellink,
+ .ndo_features_check = passthru_features_check,
++ .ndo_fill_forward_path = br_fill_forward_path,
+ };
+
+ static struct device_type br_type = {
diff --git a/target/linux/generic/pending-5.10/640-05-netfilter-flowtable-use-dev_fill_forward_path-to-obt.patch b/target/linux/generic/pending-5.10/640-05-netfilter-flowtable-use-dev_fill_forward_path-to-obt.patch
new file mode 100644
index 0000000000..34724a5696
--- /dev/null
+++ b/target/linux/generic/pending-5.10/640-05-netfilter-flowtable-use-dev_fill_forward_path-to-obt.patch
@@ -0,0 +1,191 @@
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Fri, 20 Nov 2020 13:49:18 +0100
+Subject: [PATCH] netfilter: flowtable: use dev_fill_forward_path() to
+ obtain ingress device
+
+Obtain the ingress device in the tuple from the route in the reply
+direction. Use dev_fill_forward_path() instead to get the real ingress
+device for this flow.
+
+Fall back to use the ingress device that the IP forwarding route
+provides if:
+
+- dev_fill_forward_path() finds no real ingress device.
+- the ingress device that is obtained is not part of the flowtable
+ devices.
+- this route has a xfrm policy.
+
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+---
+
+--- a/include/net/netfilter/nf_flow_table.h
++++ b/include/net/netfilter/nf_flow_table.h
+@@ -165,6 +165,9 @@ static inline __s32 nf_flow_timeout_delt
+ struct nf_flow_route {
+ struct {
+ struct dst_entry *dst;
++ struct {
++ u32 ifindex;
++ } in;
+ enum flow_offload_xmit_type xmit_type;
+ } tuple[FLOW_OFFLOAD_DIR_MAX];
+ };
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -79,7 +79,6 @@ static int flow_offload_fill_route(struc
+ enum flow_offload_tuple_dir dir)
+ {
+ struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
+- struct dst_entry *other_dst = route->tuple[!dir].dst;
+ struct dst_entry *dst = route->tuple[dir].dst;
+
+ if (!dst_hold_safe(route->tuple[dir].dst))
+@@ -94,7 +93,7 @@ static int flow_offload_fill_route(struc
+ break;
+ }
+
+- flow_tuple->iifidx = other_dst->dev->ifindex;
++ flow_tuple->iifidx = route->tuple[dir].in.ifindex;
+ flow_tuple->xmit_type = route->tuple[dir].xmit_type;
+ flow_tuple->dst_cache = dst;
+
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -31,14 +31,104 @@ static void nft_default_forward_path(str
+ struct dst_entry *dst_cache,
+ enum ip_conntrack_dir dir)
+ {
++ route->tuple[!dir].in.ifindex = dst_cache->dev->ifindex;
+ route->tuple[dir].dst = dst_cache;
+ route->tuple[dir].xmit_type = nft_xmit_type(dst_cache);
+ }
+
++static int nft_dev_fill_forward_path(const struct nf_flow_route *route,
++ const struct dst_entry *dst_cache,
++ const struct nf_conn *ct,
++ enum ip_conntrack_dir dir,
++ struct net_device_path_stack *stack)
++{
++ const void *daddr = &ct->tuplehash[!dir].tuple.src.u3;
++ struct net_device *dev = dst_cache->dev;
++ unsigned char ha[ETH_ALEN];
++ struct neighbour *n;
++ u8 nud_state;
++
++ n = dst_neigh_lookup(dst_cache, daddr);
++ if (!n)
++ return -1;
++
++ read_lock_bh(&n->lock);
++ nud_state = n->nud_state;
++ ether_addr_copy(ha, n->ha);
++ read_unlock_bh(&n->lock);
++ neigh_release(n);
++
++ if (!(nud_state & NUD_VALID))
++ return -1;
++
++ return dev_fill_forward_path(dev, ha, stack);
++}
++
++struct nft_forward_info {
++ const struct net_device *dev;
++};
++
++static void nft_dev_path_info(const struct net_device_path_stack *stack,
++ struct nft_forward_info *info)
++{
++ const struct net_device_path *path;
++ int i;
++
++ for (i = 0; i < stack->num_paths; i++) {
++ path = &stack->path[i];
++ switch (path->type) {
++ case DEV_PATH_ETHERNET:
++ info->dev = path->dev;
++ break;
++ case DEV_PATH_VLAN:
++ case DEV_PATH_BRIDGE:
++ default:
++ info->dev = NULL;
++ break;
++ }
++ }
++}
++
++static bool nft_flowtable_find_dev(const struct net_device *dev,
++ struct nft_flowtable *ft)
++{
++ struct nft_hook *hook;
++ bool found = false;
++
++ list_for_each_entry_rcu(hook, &ft->hook_list, list) {
++ if (hook->ops.dev != dev)
++ continue;
++
++ found = true;
++ break;
++ }
++
++ return found;
++}
++
++static void nft_dev_forward_path(struct nf_flow_route *route,
++ const struct nf_conn *ct,
++ enum ip_conntrack_dir dir,
++ struct nft_flowtable *ft)
++{
++ const struct dst_entry *dst = route->tuple[dir].dst;
++ struct net_device_path_stack stack;
++ struct nft_forward_info info = {};
++
++ if (nft_dev_fill_forward_path(route, dst, ct, dir, &stack) >= 0)
++ nft_dev_path_info(&stack, &info);
++
++ if (!info.dev || !nft_flowtable_find_dev(info.dev, ft))
++ return;
++
++ route->tuple[!dir].in.ifindex = info.dev->ifindex;
++}
++
+ static int nft_flow_route(const struct nft_pktinfo *pkt,
+ const struct nf_conn *ct,
+ struct nf_flow_route *route,
+- enum ip_conntrack_dir dir)
++ enum ip_conntrack_dir dir,
++ struct nft_flowtable *ft)
+ {
+ struct dst_entry *this_dst = skb_dst(pkt->skb);
+ struct dst_entry *other_dst = NULL;
+@@ -63,6 +153,12 @@ static int nft_flow_route(const struct n
+ nft_default_forward_path(route, this_dst, dir);
+ nft_default_forward_path(route, other_dst, !dir);
+
++ if (route->tuple[dir].xmit_type == FLOW_OFFLOAD_XMIT_NEIGH &&
++ route->tuple[!dir].xmit_type == FLOW_OFFLOAD_XMIT_NEIGH) {
++ nft_dev_forward_path(route, ct, dir, ft);
++ nft_dev_forward_path(route, ct, !dir, ft);
++ }
++
+ return 0;
+ }
+
+@@ -90,8 +186,8 @@ static void nft_flow_offload_eval(const
+ struct nft_flow_offload *priv = nft_expr_priv(expr);
+ struct nf_flowtable *flowtable = &priv->flowtable->data;
+ struct tcphdr _tcph, *tcph = NULL;
++ struct nf_flow_route route = {};
+ enum ip_conntrack_info ctinfo;
+- struct nf_flow_route route;
+ struct flow_offload *flow;
+ enum ip_conntrack_dir dir;
+ struct nf_conn *ct;
+@@ -128,7 +224,7 @@ static void nft_flow_offload_eval(const
+ goto out;
+
+ dir = CTINFO2DIR(ctinfo);
+- if (nft_flow_route(pkt, ct, &route, dir) < 0)
++ if (nft_flow_route(pkt, ct, &route, dir, priv->flowtable) < 0)
+ goto err_flow_route;
+
+ flow = flow_offload_alloc(ct);
diff --git a/target/linux/generic/pending-5.10/640-06-netfilter-flowtable-use-dev_fill_forward_path-to-obt.patch b/target/linux/generic/pending-5.10/640-06-netfilter-flowtable-use-dev_fill_forward_path-to-obt.patch
new file mode 100644
index 0000000000..d4bf68a690
--- /dev/null
+++ b/target/linux/generic/pending-5.10/640-06-netfilter-flowtable-use-dev_fill_forward_path-to-obt.patch
@@ -0,0 +1,336 @@
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Fri, 20 Nov 2020 13:49:19 +0100
+Subject: [PATCH] netfilter: flowtable: use dev_fill_forward_path() to
+ obtain egress device
+
+The egress device in the tuple is obtained from route. Use
+dev_fill_forward_path() instead to provide the real egress device for
+this flow whenever this is available.
+
+The new FLOW_OFFLOAD_XMIT_DIRECT type uses dev_queue_xmit() to transmit
+ethernet frames. Cache the source and destination hardware address to
+use dev_queue_xmit() to transfer packets.
+
+The FLOW_OFFLOAD_XMIT_DIRECT replaces FLOW_OFFLOAD_XMIT_NEIGH if
+dev_fill_forward_path() finds a direct transmit path.
+
+In case of topology updates, if peer is moved to different bridge port,
+the connection will time out, reconnect will result in a new entry with
+the correct path. Snooping fdb updates would allow for cleaning up stale
+flowtable entries.
+
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+---
+
+--- a/include/net/netfilter/nf_flow_table.h
++++ b/include/net/netfilter/nf_flow_table.h
+@@ -92,6 +92,7 @@ enum flow_offload_tuple_dir {
+ enum flow_offload_xmit_type {
+ FLOW_OFFLOAD_XMIT_NEIGH = 0,
+ FLOW_OFFLOAD_XMIT_XFRM,
++ FLOW_OFFLOAD_XMIT_DIRECT,
+ };
+
+ struct flow_offload_tuple {
+@@ -120,8 +121,14 @@ struct flow_offload_tuple {
+ xmit_type:2;
+
+ u16 mtu;
+-
+- struct dst_entry *dst_cache;
++ union {
++ struct dst_entry *dst_cache;
++ struct {
++ u32 ifidx;
++ u8 h_source[ETH_ALEN];
++ u8 h_dest[ETH_ALEN];
++ } out;
++ };
+ };
+
+ struct flow_offload_tuple_rhash {
+@@ -168,6 +175,11 @@ struct nf_flow_route {
+ struct {
+ u32 ifindex;
+ } in;
++ struct {
++ u32 ifindex;
++ u8 h_source[ETH_ALEN];
++ u8 h_dest[ETH_ALEN];
++ } out;
+ enum flow_offload_xmit_type xmit_type;
+ } tuple[FLOW_OFFLOAD_DIR_MAX];
+ };
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -81,9 +81,6 @@ static int flow_offload_fill_route(struc
+ struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
+ struct dst_entry *dst = route->tuple[dir].dst;
+
+- if (!dst_hold_safe(route->tuple[dir].dst))
+- return -1;
+-
+ switch (flow_tuple->l3proto) {
+ case NFPROTO_IPV4:
+ flow_tuple->mtu = ip_dst_mtu_maybe_forward(dst, true);
+@@ -94,12 +91,36 @@ static int flow_offload_fill_route(struc
+ }
+
+ flow_tuple->iifidx = route->tuple[dir].in.ifindex;
++
++ switch (route->tuple[dir].xmit_type) {
++ case FLOW_OFFLOAD_XMIT_DIRECT:
++ memcpy(flow_tuple->out.h_dest, route->tuple[dir].out.h_dest,
++ ETH_ALEN);
++ memcpy(flow_tuple->out.h_source, route->tuple[dir].out.h_source,
++ ETH_ALEN);
++ flow_tuple->out.ifidx = route->tuple[dir].out.ifindex;
++ break;
++ case FLOW_OFFLOAD_XMIT_XFRM:
++ case FLOW_OFFLOAD_XMIT_NEIGH:
++ if (!dst_hold_safe(route->tuple[dir].dst))
++ return -1;
++
++ flow_tuple->dst_cache = dst;
++ break;
++ }
+ flow_tuple->xmit_type = route->tuple[dir].xmit_type;
+- flow_tuple->dst_cache = dst;
+
+ return 0;
+ }
+
++static void nft_flow_dst_release(struct flow_offload *flow,
++ enum flow_offload_tuple_dir dir)
++{
++ if (flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
++ flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)
++ dst_release(flow->tuplehash[dir].tuple.dst_cache);
++}
++
+ int flow_offload_route_init(struct flow_offload *flow,
+ const struct nf_flow_route *route)
+ {
+@@ -118,7 +139,7 @@ int flow_offload_route_init(struct flow_
+ return 0;
+
+ err_route_reply:
+- dst_release(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst);
++ nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
+
+ return err;
+ }
+@@ -169,8 +190,8 @@ static void flow_offload_fixup_ct(struct
+
+ static void flow_offload_route_release(struct flow_offload *flow)
+ {
+- dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache);
+- dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache);
++ nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
++ nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_REPLY);
+ }
+
+ void flow_offload_free(struct flow_offload *flow)
+--- a/net/netfilter/nf_flow_table_ip.c
++++ b/net/netfilter/nf_flow_table_ip.c
+@@ -248,6 +248,24 @@ static unsigned int nf_flow_xmit_xfrm(st
+ return NF_STOLEN;
+ }
+
++static unsigned int nf_flow_queue_xmit(struct net *net, struct sk_buff *skb,
++ const struct flow_offload_tuple_rhash *tuplehash,
++ unsigned short type)
++{
++ struct net_device *outdev;
++
++ outdev = dev_get_by_index_rcu(net, tuplehash->tuple.out.ifidx);
++ if (!outdev)
++ return NF_DROP;
++
++ skb->dev = outdev;
++ dev_hard_header(skb, skb->dev, type, tuplehash->tuple.out.h_dest,
++ tuplehash->tuple.out.h_source, skb->len);
++ dev_queue_xmit(skb);
++
++ return NF_STOLEN;
++}
++
+ unsigned int
+ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+@@ -262,6 +280,7 @@ nf_flow_offload_ip_hook(void *priv, stru
+ unsigned int thoff;
+ struct iphdr *iph;
+ __be32 nexthop;
++ int ret;
+
+ if (skb->protocol != htons(ETH_P_IP))
+ return NF_ACCEPT;
+@@ -303,22 +322,32 @@ nf_flow_offload_ip_hook(void *priv, stru
+ if (flow_table->flags & NF_FLOWTABLE_COUNTER)
+ nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
+
+- rt = (struct rtable *)tuplehash->tuple.dst_cache;
+-
+ if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
++ rt = (struct rtable *)tuplehash->tuple.dst_cache;
+ memset(skb->cb, 0, sizeof(struct inet_skb_parm));
+ IPCB(skb)->iif = skb->dev->ifindex;
+ IPCB(skb)->flags = IPSKB_FORWARDED;
+ return nf_flow_xmit_xfrm(skb, state, &rt->dst);
+ }
+
+- outdev = rt->dst.dev;
+- skb->dev = outdev;
+- nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
+- skb_dst_set_noref(skb, &rt->dst);
+- neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
++ switch (tuplehash->tuple.xmit_type) {
++ case FLOW_OFFLOAD_XMIT_NEIGH:
++ rt = (struct rtable *)tuplehash->tuple.dst_cache;
++ outdev = rt->dst.dev;
++ skb->dev = outdev;
++ nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
++ skb_dst_set_noref(skb, &rt->dst);
++ neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
++ ret = NF_STOLEN;
++ break;
++ case FLOW_OFFLOAD_XMIT_DIRECT:
++ ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IP);
++ if (ret == NF_DROP)
++ flow_offload_teardown(flow);
++ break;
++ }
+
+- return NF_STOLEN;
++ return ret;
+ }
+ EXPORT_SYMBOL_GPL(nf_flow_offload_ip_hook);
+
+@@ -504,6 +533,7 @@ nf_flow_offload_ipv6_hook(void *priv, st
+ struct net_device *outdev;
+ struct ipv6hdr *ip6h;
+ struct rt6_info *rt;
++ int ret;
+
+ if (skb->protocol != htons(ETH_P_IPV6))
+ return NF_ACCEPT;
+@@ -545,21 +575,31 @@ nf_flow_offload_ipv6_hook(void *priv, st
+ if (flow_table->flags & NF_FLOWTABLE_COUNTER)
+ nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
+
+- rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
+-
+ if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
++ rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
+ memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
+ IP6CB(skb)->iif = skb->dev->ifindex;
+ IP6CB(skb)->flags = IP6SKB_FORWARDED;
+ return nf_flow_xmit_xfrm(skb, state, &rt->dst);
+ }
+
+- outdev = rt->dst.dev;
+- skb->dev = outdev;
+- nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
+- skb_dst_set_noref(skb, &rt->dst);
+- neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
++ switch (tuplehash->tuple.xmit_type) {
++ case FLOW_OFFLOAD_XMIT_NEIGH:
++ rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
++ outdev = rt->dst.dev;
++ skb->dev = outdev;
++ nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
++ skb_dst_set_noref(skb, &rt->dst);
++ neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
++ ret = NF_STOLEN;
++ break;
++ case FLOW_OFFLOAD_XMIT_DIRECT:
++ ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IPV6);
++ if (ret == NF_DROP)
++ flow_offload_teardown(flow);
++ break;
++ }
+
+- return NF_STOLEN;
++ return ret;
+ }
+ EXPORT_SYMBOL_GPL(nf_flow_offload_ipv6_hook);
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -39,12 +39,11 @@ static void nft_default_forward_path(str
+ static int nft_dev_fill_forward_path(const struct nf_flow_route *route,
+ const struct dst_entry *dst_cache,
+ const struct nf_conn *ct,
+- enum ip_conntrack_dir dir,
++ enum ip_conntrack_dir dir, u8 *ha,
+ struct net_device_path_stack *stack)
+ {
+ const void *daddr = &ct->tuplehash[!dir].tuple.src.u3;
+ struct net_device *dev = dst_cache->dev;
+- unsigned char ha[ETH_ALEN];
+ struct neighbour *n;
+ u8 nud_state;
+
+@@ -66,22 +65,35 @@ static int nft_dev_fill_forward_path(con
+
+ struct nft_forward_info {
+ const struct net_device *dev;
++ u8 h_source[ETH_ALEN];
++ u8 h_dest[ETH_ALEN];
++ enum flow_offload_xmit_type xmit_type;
+ };
+
+ static void nft_dev_path_info(const struct net_device_path_stack *stack,
+- struct nft_forward_info *info)
++ struct nft_forward_info *info,
++ unsigned char *ha)
+ {
+ const struct net_device_path *path;
+ int i;
+
++ memcpy(info->h_dest, ha, ETH_ALEN);
++
+ for (i = 0; i < stack->num_paths; i++) {
+ path = &stack->path[i];
+ switch (path->type) {
+ case DEV_PATH_ETHERNET:
+ info->dev = path->dev;
++ if (is_zero_ether_addr(info->h_source))
++ memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN);
+ break;
+- case DEV_PATH_VLAN:
+ case DEV_PATH_BRIDGE:
++ if (is_zero_ether_addr(info->h_source))
++ memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN);
++
++ info->xmit_type = FLOW_OFFLOAD_XMIT_DIRECT;
++ break;
++ case DEV_PATH_VLAN:
+ default:
+ info->dev = NULL;
+ break;
+@@ -114,14 +126,22 @@ static void nft_dev_forward_path(struct
+ const struct dst_entry *dst = route->tuple[dir].dst;
+ struct net_device_path_stack stack;
+ struct nft_forward_info info = {};
++ unsigned char ha[ETH_ALEN];
+
+- if (nft_dev_fill_forward_path(route, dst, ct, dir, &stack) >= 0)
+- nft_dev_path_info(&stack, &info);
++ if (nft_dev_fill_forward_path(route, dst, ct, dir, ha, &stack) >= 0)
++ nft_dev_path_info(&stack, &info, ha);
+
+ if (!info.dev || !nft_flowtable_find_dev(info.dev, ft))
+ return;
+
+ route->tuple[!dir].in.ifindex = info.dev->ifindex;
++
++ if (info.xmit_type == FLOW_OFFLOAD_XMIT_DIRECT) {
++ memcpy(route->tuple[dir].out.h_source, info.h_source, ETH_ALEN);
++ memcpy(route->tuple[dir].out.h_dest, info.h_dest, ETH_ALEN);
++ route->tuple[dir].out.ifindex = info.dev->ifindex;
++ route->tuple[dir].xmit_type = info.xmit_type;
++ }
+ }
+
+ static int nft_flow_route(const struct nft_pktinfo *pkt,
diff --git a/target/linux/generic/pending-5.10/640-07-netfilter-flowtable-add-vlan-support.patch b/target/linux/generic/pending-5.10/640-07-netfilter-flowtable-add-vlan-support.patch
new file mode 100644
index 0000000000..77aeaccbce
--- /dev/null
+++ b/target/linux/generic/pending-5.10/640-07-netfilter-flowtable-add-vlan-support.patch
@@ -0,0 +1,364 @@
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Fri, 20 Nov 2020 13:49:20 +0100
+Subject: [PATCH] netfilter: flowtable: add vlan support
+
+Add the vlan id and protocol to the flow tuple to uniquely identify
+flows from the receive path. For the transmit path, dev_hard_header() on
+the vlan device push the headers. This patch includes support for two
+VLAN headers (QinQ) from the ingress path.
+
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+---
+
+--- a/include/net/netfilter/nf_flow_table.h
++++ b/include/net/netfilter/nf_flow_table.h
+@@ -95,6 +95,8 @@ enum flow_offload_xmit_type {
+ FLOW_OFFLOAD_XMIT_DIRECT,
+ };
+
++#define NF_FLOW_TABLE_VLAN_MAX 2
++
+ struct flow_offload_tuple {
+ union {
+ struct in_addr src_v4;
+@@ -113,13 +115,17 @@ struct flow_offload_tuple {
+
+ u8 l3proto;
+ u8 l4proto;
++ struct {
++ u16 id;
++ __be16 proto;
++ } in_vlan[NF_FLOW_TABLE_VLAN_MAX];
+
+ /* All members above are keys for lookups, see flow_offload_hash(). */
+ struct { } __hash;
+
+- u8 dir:6,
+- xmit_type:2;
+-
++ u8 dir:4,
++ xmit_type:2,
++ in_vlan_num:2;
+ u16 mtu;
+ union {
+ struct dst_entry *dst_cache;
+@@ -174,6 +180,9 @@ struct nf_flow_route {
+ struct dst_entry *dst;
+ struct {
+ u32 ifindex;
++ u16 vid[NF_FLOW_TABLE_VLAN_MAX];
++ __be16 vproto[NF_FLOW_TABLE_VLAN_MAX];
++ u8 num_vlans;
+ } in;
+ struct {
+ u32 ifindex;
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -80,6 +80,7 @@ static int flow_offload_fill_route(struc
+ {
+ struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
+ struct dst_entry *dst = route->tuple[dir].dst;
++ int i, j = 0;
+
+ switch (flow_tuple->l3proto) {
+ case NFPROTO_IPV4:
+@@ -91,6 +92,12 @@ static int flow_offload_fill_route(struc
+ }
+
+ flow_tuple->iifidx = route->tuple[dir].in.ifindex;
++ for (i = route->tuple[dir].in.num_vlans - 1; i >= 0; i--) {
++ flow_tuple->in_vlan[j].id = route->tuple[dir].in.vid[i];
++ flow_tuple->in_vlan[j].proto = route->tuple[dir].in.vproto[i];
++ j++;
++ }
++ flow_tuple->in_vlan_num = route->tuple[dir].in.num_vlans;
+
+ switch (route->tuple[dir].xmit_type) {
+ case FLOW_OFFLOAD_XMIT_DIRECT:
+--- a/net/netfilter/nf_flow_table_ip.c
++++ b/net/netfilter/nf_flow_table_ip.c
+@@ -159,17 +159,35 @@ static bool ip_has_options(unsigned int
+ return thoff != sizeof(struct iphdr);
+ }
+
++static void nf_flow_tuple_vlan(struct sk_buff *skb,
++ struct flow_offload_tuple *tuple)
++{
++ if (skb_vlan_tag_present(skb)) {
++ tuple->in_vlan[0].id = skb_vlan_tag_get(skb);
++ tuple->in_vlan[0].proto = skb->vlan_proto;
++ }
++ if (skb->protocol == htons(ETH_P_8021Q)) {
++ struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
++
++ tuple->in_vlan[1].id = ntohs(veth->h_vlan_TCI);
++ tuple->in_vlan[1].proto = skb->protocol;
++ }
++}
++
+ static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
+ struct flow_offload_tuple *tuple)
+ {
+- unsigned int thoff, hdrsize;
++ unsigned int thoff, hdrsize, offset = 0;
+ struct flow_ports *ports;
+ struct iphdr *iph;
+
+- if (!pskb_may_pull(skb, sizeof(*iph)))
++ if (skb->protocol == htons(ETH_P_8021Q))
++ offset += VLAN_HLEN;
++
++ if (!pskb_may_pull(skb, sizeof(*iph) + offset))
+ return -1;
+
+- iph = ip_hdr(skb);
++ iph = (struct iphdr *)(skb_network_header(skb) + offset);
+ thoff = iph->ihl * 4;
+
+ if (ip_is_fragment(iph) ||
+@@ -191,11 +209,11 @@ static int nf_flow_tuple_ip(struct sk_bu
+ return -1;
+
+ thoff = iph->ihl * 4;
+- if (!pskb_may_pull(skb, thoff + hdrsize))
++ if (!pskb_may_pull(skb, thoff + hdrsize + offset))
+ return -1;
+
+- iph = ip_hdr(skb);
+- ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
++ iph = (struct iphdr *)(skb_network_header(skb) + offset);
++ ports = (struct flow_ports *)(skb_network_header(skb) + thoff + offset);
+
+ tuple->src_v4.s_addr = iph->saddr;
+ tuple->dst_v4.s_addr = iph->daddr;
+@@ -204,6 +222,7 @@ static int nf_flow_tuple_ip(struct sk_bu
+ tuple->l3proto = AF_INET;
+ tuple->l4proto = iph->protocol;
+ tuple->iifidx = dev->ifindex;
++ nf_flow_tuple_vlan(skb, tuple);
+
+ return 0;
+ }
+@@ -248,6 +267,37 @@ static unsigned int nf_flow_xmit_xfrm(st
+ return NF_STOLEN;
+ }
+
++static bool nf_flow_skb_vlan_protocol(const struct sk_buff *skb, __be16 proto)
++{
++ if (skb->protocol == htons(ETH_P_8021Q)) {
++ struct vlan_ethhdr *veth;
++
++ veth = (struct vlan_ethhdr *)skb_mac_header(skb);
++ if (veth->h_vlan_encapsulated_proto == proto)
++ return true;
++ }
++
++ return false;
++}
++
++static void nf_flow_vlan_pop(struct sk_buff *skb,
++ struct flow_offload_tuple_rhash *tuplehash)
++{
++ struct vlan_hdr *vlan_hdr;
++ int i;
++
++ for (i = 0; i < tuplehash->tuple.in_vlan_num; i++) {
++ if (skb_vlan_tag_present(skb)) {
++ __vlan_hwaccel_clear_tag(skb);
++ continue;
++ }
++ vlan_hdr = (struct vlan_hdr *)skb->data;
++ __skb_pull(skb, VLAN_HLEN);
++ vlan_set_encap_proto(skb, vlan_hdr);
++ skb_reset_network_header(skb);
++ }
++}
++
+ static unsigned int nf_flow_queue_xmit(struct net *net, struct sk_buff *skb,
+ const struct flow_offload_tuple_rhash *tuplehash,
+ unsigned short type)
+@@ -280,9 +330,11 @@ nf_flow_offload_ip_hook(void *priv, stru
+ unsigned int thoff;
+ struct iphdr *iph;
+ __be32 nexthop;
++ u32 offset = 0;
+ int ret;
+
+- if (skb->protocol != htons(ETH_P_IP))
++ if (skb->protocol != htons(ETH_P_IP) &&
++ !nf_flow_skb_vlan_protocol(skb, htons(ETH_P_IP)))
+ return NF_ACCEPT;
+
+ if (nf_flow_tuple_ip(skb, state->in, &tuple) < 0)
+@@ -298,11 +350,15 @@ nf_flow_offload_ip_hook(void *priv, stru
+ if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
+ return NF_ACCEPT;
+
+- if (skb_try_make_writable(skb, sizeof(*iph)))
++ if (skb->protocol == htons(ETH_P_8021Q))
++ offset += VLAN_HLEN;
++
++ if (skb_try_make_writable(skb, sizeof(*iph) + offset))
+ return NF_DROP;
+
+- thoff = ip_hdr(skb)->ihl * 4;
+- if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff))
++ iph = (struct iphdr *)(skb_network_header(skb) + offset);
++ thoff = (iph->ihl * 4) + offset;
++ if (nf_flow_state_check(flow, iph->protocol, skb, thoff))
+ return NF_ACCEPT;
+
+ flow_offload_refresh(flow_table, flow);
+@@ -312,6 +368,9 @@ nf_flow_offload_ip_hook(void *priv, stru
+ return NF_ACCEPT;
+ }
+
++ nf_flow_vlan_pop(skb, tuplehash);
++ thoff -= offset;
++
+ if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0)
+ return NF_DROP;
+
+@@ -479,14 +538,17 @@ static int nf_flow_nat_ipv6(const struct
+ static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
+ struct flow_offload_tuple *tuple)
+ {
+- unsigned int thoff, hdrsize;
++ unsigned int thoff, hdrsize, offset = 0;
+ struct flow_ports *ports;
+ struct ipv6hdr *ip6h;
+
+- if (!pskb_may_pull(skb, sizeof(*ip6h)))
++ if (skb->protocol == htons(ETH_P_8021Q))
++ offset += VLAN_HLEN;
++
++ if (!pskb_may_pull(skb, sizeof(*ip6h) + offset))
+ return -1;
+
+- ip6h = ipv6_hdr(skb);
++ ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset);
+
+ switch (ip6h->nexthdr) {
+ case IPPROTO_TCP:
+@@ -503,11 +565,11 @@ static int nf_flow_tuple_ipv6(struct sk_
+ return -1;
+
+ thoff = sizeof(*ip6h);
+- if (!pskb_may_pull(skb, thoff + hdrsize))
++ if (!pskb_may_pull(skb, thoff + hdrsize + offset))
+ return -1;
+
+- ip6h = ipv6_hdr(skb);
+- ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
++ ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset);
++ ports = (struct flow_ports *)(skb_network_header(skb) + thoff + offset);
+
+ tuple->src_v6 = ip6h->saddr;
+ tuple->dst_v6 = ip6h->daddr;
+@@ -516,6 +578,7 @@ static int nf_flow_tuple_ipv6(struct sk_
+ tuple->l3proto = AF_INET6;
+ tuple->l4proto = ip6h->nexthdr;
+ tuple->iifidx = dev->ifindex;
++ nf_flow_tuple_vlan(skb, tuple);
+
+ return 0;
+ }
+@@ -533,9 +596,11 @@ nf_flow_offload_ipv6_hook(void *priv, st
+ struct net_device *outdev;
+ struct ipv6hdr *ip6h;
+ struct rt6_info *rt;
++ u32 offset = 0;
+ int ret;
+
+- if (skb->protocol != htons(ETH_P_IPV6))
++ if (skb->protocol != htons(ETH_P_IPV6) &&
++ !nf_flow_skb_vlan_protocol(skb, htons(ETH_P_IPV6)))
+ return NF_ACCEPT;
+
+ if (nf_flow_tuple_ipv6(skb, state->in, &tuple) < 0)
+@@ -551,8 +616,11 @@ nf_flow_offload_ipv6_hook(void *priv, st
+ if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
+ return NF_ACCEPT;
+
+- if (nf_flow_state_check(flow, ipv6_hdr(skb)->nexthdr, skb,
+- sizeof(*ip6h)))
++ if (skb->protocol == htons(ETH_P_8021Q))
++ offset += VLAN_HLEN;
++
++ ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset);
++ if (nf_flow_state_check(flow, ip6h->nexthdr, skb, sizeof(*ip6h)))
+ return NF_ACCEPT;
+
+ flow_offload_refresh(flow_table, flow);
+@@ -562,6 +630,8 @@ nf_flow_offload_ipv6_hook(void *priv, st
+ return NF_ACCEPT;
+ }
+
++ nf_flow_vlan_pop(skb, tuplehash);
++
+ if (skb_try_make_writable(skb, sizeof(*ip6h)))
+ return NF_DROP;
+
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -65,6 +65,9 @@ static int nft_dev_fill_forward_path(con
+
+ struct nft_forward_info {
+ const struct net_device *dev;
++ __u16 vid[NF_FLOW_TABLE_VLAN_MAX];
++ __be16 vproto[NF_FLOW_TABLE_VLAN_MAX];
++ u8 num_vlans;
+ u8 h_source[ETH_ALEN];
+ u8 h_dest[ETH_ALEN];
+ enum flow_offload_xmit_type xmit_type;
+@@ -83,9 +86,22 @@ static void nft_dev_path_info(const stru
+ path = &stack->path[i];
+ switch (path->type) {
+ case DEV_PATH_ETHERNET:
++ case DEV_PATH_VLAN:
+ info->dev = path->dev;
+ if (is_zero_ether_addr(info->h_source))
+ memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN);
++
++ if (path->type == DEV_PATH_ETHERNET)
++ break;
++
++ /* DEV_PATH_VLAN */
++ if (info->num_vlans >= NF_FLOW_TABLE_VLAN_MAX) {
++ info->dev = NULL;
++ break;
++ }
++ info->vid[info->num_vlans] = path->vlan.id;
++ info->vproto[info->num_vlans] = path->vlan.proto;
++ info->num_vlans++;
+ break;
+ case DEV_PATH_BRIDGE:
+ if (is_zero_ether_addr(info->h_source))
+@@ -93,7 +109,6 @@ static void nft_dev_path_info(const stru
+
+ info->xmit_type = FLOW_OFFLOAD_XMIT_DIRECT;
+ break;
+- case DEV_PATH_VLAN:
+ default:
+ info->dev = NULL;
+ break;
+@@ -127,6 +142,7 @@ static void nft_dev_forward_path(struct
+ struct net_device_path_stack stack;
+ struct nft_forward_info info = {};
+ unsigned char ha[ETH_ALEN];
++ int i;
+
+ if (nft_dev_fill_forward_path(route, dst, ct, dir, ha, &stack) >= 0)
+ nft_dev_path_info(&stack, &info, ha);
+@@ -135,6 +151,11 @@ static void nft_dev_forward_path(struct
+ return;
+
+ route->tuple[!dir].in.ifindex = info.dev->ifindex;
++ for (i = 0; i < info.num_vlans; i++) {
++ route->tuple[!dir].in.vid[i] = info.vid[i];
++ route->tuple[!dir].in.vproto[i] = info.vproto[i];
++ }
++ route->tuple[!dir].in.num_vlans = info.num_vlans;
+
+ if (info.xmit_type == FLOW_OFFLOAD_XMIT_DIRECT) {
+ memcpy(route->tuple[dir].out.h_source, info.h_source, ETH_ALEN);
diff --git a/target/linux/generic/pending-5.10/640-08-selftests-netfilter-flowtable-bridge-and-VLAN-suppor.patch b/target/linux/generic/pending-5.10/640-08-selftests-netfilter-flowtable-bridge-and-VLAN-suppor.patch
new file mode 100644
index 0000000000..1ee015c6aa
--- /dev/null
+++ b/target/linux/generic/pending-5.10/640-08-selftests-netfilter-flowtable-bridge-and-VLAN-suppor.patch
@@ -0,0 +1,107 @@
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Fri, 20 Nov 2020 13:49:21 +0100
+Subject: [PATCH] selftests: netfilter: flowtable bridge and VLAN support
+
+This patch adds two new tests to cover bridge and VLAN support:
+
+- Add a bridge device to the Router1 (nsr1) container and attach the
+ veth0 device to the bridge. Set the IP address to the bridge device
+ to exercise the bridge forwarding path.
+
+- Add VLAN encapsulation between to the bridge device in the Router1 and
+ one of the sender containers (ns1).
+
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+---
+
+--- a/tools/testing/selftests/netfilter/nft_flowtable.sh
++++ b/tools/testing/selftests/netfilter/nft_flowtable.sh
+@@ -371,6 +371,88 @@ else
+ ip netns exec nsr1 nft list ruleset
+ fi
+
++# Another test:
++# Add bridge interface br0 to Router1, with NAT enabled.
++ip -net nsr1 link add name br0 type bridge
++ip -net nsr1 addr flush dev veth0
++ip -net nsr1 link set up dev veth0
++ip -net nsr1 link set veth0 master br0
++ip -net nsr1 addr add 10.0.1.1/24 dev br0
++ip -net nsr1 addr add dead:1::1/64 dev br0
++ip -net nsr1 link set up dev br0
++
++ip netns exec nsr1 sysctl net.ipv4.conf.br0.forwarding=1 > /dev/null
++
++# br0 with NAT enabled.
++ip netns exec nsr1 nft -f - <<EOF
++flush table ip nat
++table ip nat {
++ chain prerouting {
++ type nat hook prerouting priority 0; policy accept;
++ meta iif "br0" ip daddr 10.6.6.6 tcp dport 1666 counter dnat ip to 10.0.2.99:12345
++ }
++
++ chain postrouting {
++ type nat hook postrouting priority 0; policy accept;
++ meta oifname "veth1" counter masquerade
++ }
++}
++EOF
++
++if test_tcp_forwarding_nat ns1 ns2; then
++ echo "PASS: flow offloaded for ns1/ns2 with bridge NAT"
++else
++ echo "FAIL: flow offload for ns1/ns2 with bridge NAT" 1>&2
++ ip netns exec nsr1 nft list ruleset
++ ret=1
++fi
++
++# Another test:
++# Add bridge interface br0 to Router1, with NAT and VLAN.
++ip -net nsr1 link set veth0 nomaster
++ip -net nsr1 link set down dev veth0
++ip -net nsr1 link add link veth0 name veth0.10 type vlan id 10
++ip -net nsr1 link set up dev veth0
++ip -net nsr1 link set up dev veth0.10
++ip -net nsr1 link set veth0.10 master br0
++
++ip -net ns1 addr flush dev eth0
++ip -net ns1 link add link eth0 name eth0.10 type vlan id 10
++ip -net ns1 link set eth0 up
++ip -net ns1 link set eth0.10 up
++ip -net ns1 addr add 10.0.1.99/24 dev eth0.10
++ip -net ns1 route add default via 10.0.1.1
++ip -net ns1 addr add dead:1::99/64 dev eth0.10
++
++if test_tcp_forwarding_nat ns1 ns2; then
++ echo "PASS: flow offloaded for ns1/ns2 with bridge NAT and VLAN"
++else
++ echo "FAIL: flow offload for ns1/ns2 with bridge NAT and VLAN" 1>&2
++ ip netns exec nsr1 nft list ruleset
++ ret=1
++fi
++
++# restore test topology (remove bridge and VLAN)
++ip -net nsr1 link set veth0 nomaster
++ip -net nsr1 link set veth0 down
++ip -net nsr1 link set veth0.10 down
++ip -net nsr1 link delete veth0.10 type vlan
++ip -net nsr1 link delete br0 type bridge
++ip -net ns1 addr flush dev eth0.10
++ip -net ns1 link set eth0.10 down
++ip -net ns1 link set eth0 down
++ip -net ns1 link delete eth0.10 type vlan
++
++# restore address in ns1 and nsr1
++ip -net ns1 link set eth0 up
++ip -net ns1 addr add 10.0.1.99/24 dev eth0
++ip -net ns1 route add default via 10.0.1.1
++ip -net ns1 addr add dead:1::99/64 dev eth0
++ip -net ns1 route add default via dead:1::1
++ip -net nsr1 addr add 10.0.1.1/24 dev veth0
++ip -net nsr1 addr add dead:1::1/64 dev veth0
++ip -net nsr1 link set up dev veth0
++
+ KEY_SHA="0x"$(ps -xaf | sha1sum | cut -d " " -f 1)
+ KEY_AES="0x"$(ps -xaf | md5sum | cut -d " " -f 1)
+ SPI1=$RANDOM
diff --git a/target/linux/generic/pending-5.10/640-09-net-bridge-resolve-VLAN-tag-actions-in-forwarding-pa.patch b/target/linux/generic/pending-5.10/640-09-net-bridge-resolve-VLAN-tag-actions-in-forwarding-pa.patch
new file mode 100644
index 0000000000..fb5f736648
--- /dev/null
+++ b/target/linux/generic/pending-5.10/640-09-net-bridge-resolve-VLAN-tag-actions-in-forwarding-pa.patch
@@ -0,0 +1,207 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Mon, 7 Dec 2020 20:31:45 +0100
+Subject: [PATCH] net: bridge: resolve VLAN tag actions in forwarding
+ path for bridge devices
+
+Depending on the VLAN settings of the bridge and the port, the bridge can
+either add or remove a tag. When vlan filtering is enabled, the fdb lookup
+also needs to know the VLAN tag/proto for the destination address
+To provide this, keep track of the stack of VLAN tags for the path in the
+lookup context
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+---
+
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -847,10 +847,20 @@ struct net_device_path {
+ u16 id;
+ __be16 proto;
+ } vlan;
++ struct {
++ enum {
++ DEV_PATH_BR_VLAN_KEEP,
++ DEV_PATH_BR_VLAN_TAG,
++ DEV_PATH_BR_VLAN_UNTAG,
++ } vlan_mode;
++ u16 vlan_id;
++ __be16 vlan_proto;
++ } bridge;
+ };
+ };
+
+ #define NET_DEVICE_PATH_STACK_MAX 5
++#define NET_DEVICE_PATH_VLAN_MAX 2
+
+ struct net_device_path_stack {
+ int num_paths;
+@@ -860,6 +870,12 @@ struct net_device_path_stack {
+ struct net_device_path_ctx {
+ const struct net_device *dev;
+ const u8 *daddr;
++
++ int num_vlans;
++ struct {
++ u16 id;
++ __be16 proto;
++ } vlan[NET_DEVICE_PATH_VLAN_MAX];
+ };
+
+ enum tc_setup_type {
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -777,6 +777,12 @@ static int vlan_dev_fill_forward_path(st
+ path->vlan.proto = vlan->vlan_proto;
+ path->dev = ctx->dev;
+ ctx->dev = vlan->real_dev;
++ if (ctx->num_vlans >= ARRAY_SIZE(ctx->vlan))
++ return -ENOSPC;
++
++ ctx->vlan[ctx->num_vlans].id = vlan->vlan_id;
++ ctx->vlan[ctx->num_vlans].proto = vlan->vlan_proto;
++ ctx->num_vlans++;
+
+ return 0;
+ }
+--- a/net/bridge/br_device.c
++++ b/net/bridge/br_device.c
+@@ -409,7 +409,10 @@ static int br_fill_forward_path(struct n
+ return -1;
+
+ br = netdev_priv(ctx->dev);
+- f = br_fdb_find_rcu(br, ctx->daddr, 0);
++
++ br_vlan_fill_forward_path_pvid(br, ctx, path);
++
++ f = br_fdb_find_rcu(br, ctx->daddr, path->bridge.vlan_id);
+ if (!f || !f->dst)
+ return -1;
+
+@@ -417,10 +420,28 @@ static int br_fill_forward_path(struct n
+ if (!dst)
+ return -1;
+
++ if (br_vlan_fill_forward_path_mode(br, dst, path))
++ return -1;
++
+ path->type = DEV_PATH_BRIDGE;
+ path->dev = dst->br->dev;
+ ctx->dev = dst->dev;
+
++ switch (path->bridge.vlan_mode) {
++ case DEV_PATH_BR_VLAN_TAG:
++ if (ctx->num_vlans >= ARRAY_SIZE(ctx->vlan))
++ return -ENOSPC;
++ ctx->vlan[ctx->num_vlans].id = path->bridge.vlan_id;
++ ctx->vlan[ctx->num_vlans].proto = path->bridge.vlan_proto;
++ ctx->num_vlans++;
++ break;
++ case DEV_PATH_BR_VLAN_UNTAG:
++ ctx->num_vlans--;
++ break;
++ case DEV_PATH_BR_VLAN_KEEP:
++ break;
++ }
++
+ return 0;
+ }
+
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -1095,6 +1095,13 @@ void br_vlan_notify(const struct net_bri
+ bool br_vlan_can_enter_range(const struct net_bridge_vlan *v_curr,
+ const struct net_bridge_vlan *range_end);
+
++void br_vlan_fill_forward_path_pvid(struct net_bridge *br,
++ struct net_device_path_ctx *ctx,
++ struct net_device_path *path);
++int br_vlan_fill_forward_path_mode(struct net_bridge *br,
++ struct net_bridge_port *dst,
++ struct net_device_path *path);
++
+ static inline struct net_bridge_vlan_group *br_vlan_group(
+ const struct net_bridge *br)
+ {
+@@ -1252,6 +1259,19 @@ static inline int nbp_get_num_vlan_infos
+ {
+ return 0;
+ }
++
++static inline void br_vlan_fill_forward_path_pvid(struct net_bridge *br,
++ struct net_device_path_ctx *ctx,
++ struct net_device_path *path)
++{
++}
++
++static inline int br_vlan_fill_forward_path_mode(struct net_bridge *br,
++ struct net_bridge_port *dst,
++ struct net_device_path *path)
++{
++ return 0;
++}
+
+ static inline struct net_bridge_vlan_group *br_vlan_group(
+ const struct net_bridge *br)
+--- a/net/bridge/br_vlan.c
++++ b/net/bridge/br_vlan.c
+@@ -1327,6 +1327,59 @@ int br_vlan_get_pvid_rcu(const struct ne
+ }
+ EXPORT_SYMBOL_GPL(br_vlan_get_pvid_rcu);
+
++void br_vlan_fill_forward_path_pvid(struct net_bridge *br,
++ struct net_device_path_ctx *ctx,
++ struct net_device_path *path)
++{
++ struct net_bridge_vlan_group *vg;
++ int idx = ctx->num_vlans - 1;
++ u16 vid;
++
++ path->bridge.vlan_mode = DEV_PATH_BR_VLAN_KEEP;
++
++ if (!br_opt_get(br, BROPT_VLAN_ENABLED))
++ return;
++
++ vg = br_vlan_group(br);
++
++ if (idx >= 0 &&
++ ctx->vlan[idx].proto == br->vlan_proto) {
++ vid = ctx->vlan[idx].id;
++ } else {
++ path->bridge.vlan_mode = DEV_PATH_BR_VLAN_TAG;
++ vid = br_get_pvid(vg);
++ }
++
++ path->bridge.vlan_id = vid;
++ path->bridge.vlan_proto = br->vlan_proto;
++}
++
++int br_vlan_fill_forward_path_mode(struct net_bridge *br,
++ struct net_bridge_port *dst,
++ struct net_device_path *path)
++{
++ struct net_bridge_vlan_group *vg;
++ struct net_bridge_vlan *v;
++
++ if (!br_opt_get(br, BROPT_VLAN_ENABLED))
++ return 0;
++
++ vg = nbp_vlan_group_rcu(dst);
++ v = br_vlan_find(vg, path->bridge.vlan_id);
++ if (!v || !br_vlan_should_use(v))
++ return -EINVAL;
++
++ if (!(v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
++ return 0;
++
++ if (path->bridge.vlan_mode == DEV_PATH_BR_VLAN_TAG)
++ path->bridge.vlan_mode = DEV_PATH_BR_VLAN_KEEP;
++ else
++ path->bridge.vlan_mode = DEV_PATH_BR_VLAN_UNTAG;
++
++ return 0;
++}
++
+ int br_vlan_get_info(const struct net_device *dev, u16 vid,
+ struct bridge_vlan_info *p_vinfo)
+ {
diff --git a/target/linux/generic/pending-5.10/640-10-net-dsa-resolve-forwarding-path-for-dsa-slave-ports.patch b/target/linux/generic/pending-5.10/640-10-net-dsa-resolve-forwarding-path-for-dsa-slave-ports.patch
new file mode 100644
index 0000000000..64a2b14af5
--- /dev/null
+++ b/target/linux/generic/pending-5.10/640-10-net-dsa-resolve-forwarding-path-for-dsa-slave-ports.patch
@@ -0,0 +1,62 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Mon, 7 Dec 2020 20:31:48 +0100
+Subject: [PATCH] net: dsa: resolve forwarding path for dsa slave ports
+
+Add .ndo_fill_forward_path for dsa slave port devices
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -837,6 +837,7 @@ enum net_device_path_type {
+ DEV_PATH_ETHERNET = 0,
+ DEV_PATH_VLAN,
+ DEV_PATH_BRIDGE,
++ DEV_PATH_DSA,
+ };
+
+ struct net_device_path {
+@@ -856,6 +857,10 @@ struct net_device_path {
+ u16 vlan_id;
+ __be16 vlan_proto;
+ } bridge;
++ struct {
++ int port;
++ u16 proto;
++ } dsa;
+ };
+ };
+
+--- a/net/dsa/slave.c
++++ b/net/dsa/slave.c
+@@ -1582,6 +1582,21 @@ static struct devlink_port *dsa_slave_ge
+ return dp->ds->devlink ? &dp->devlink_port : NULL;
+ }
+
++static int dsa_slave_fill_forward_path(struct net_device_path_ctx *ctx,
++ struct net_device_path *path)
++{
++ struct dsa_port *dp = dsa_slave_to_port(ctx->dev);
++ struct dsa_port *cpu_dp = dp->cpu_dp;
++
++ path->dev = ctx->dev;
++ path->type = DEV_PATH_DSA;
++ path->dsa.proto = cpu_dp->tag_ops->proto;
++ path->dsa.port = dp->index;
++ ctx->dev = cpu_dp->master;
++
++ return 0;
++}
++
+ static const struct net_device_ops dsa_slave_netdev_ops = {
+ .ndo_open = dsa_slave_open,
+ .ndo_stop = dsa_slave_close,
+@@ -1607,6 +1622,7 @@ static const struct net_device_ops dsa_s
+ .ndo_vlan_rx_kill_vid = dsa_slave_vlan_rx_kill_vid,
+ .ndo_get_devlink_port = dsa_slave_get_devlink_port,
+ .ndo_change_mtu = dsa_slave_change_mtu,
++ .ndo_fill_forward_path = dsa_slave_fill_forward_path,
+ };
+
+ static struct device_type dsa_type = {
diff --git a/target/linux/generic/pending-5.10/640-11-netfilter-flowtable-add-offload-support-for-xmit-pat.patch b/target/linux/generic/pending-5.10/640-11-netfilter-flowtable-add-offload-support-for-xmit-pat.patch
new file mode 100644
index 0000000000..5e3c7e031a
--- /dev/null
+++ b/target/linux/generic/pending-5.10/640-11-netfilter-flowtable-add-offload-support-for-xmit-pat.patch
@@ -0,0 +1,307 @@
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Mon, 7 Dec 2020 20:31:44 +0100
+Subject: [PATCH] netfilter: flowtable: add offload support for xmit path
+ types
+
+When the flow tuple xmit_type is set to FLOW_OFFLOAD_XMIT_DIRECT, the
+dst_cache pointer is not valid, and the h_source/h_dest/ifidx out fields
+need to be used.
+
+This patch also adds the FLOW_ACTION_VLAN_PUSH action to pass the VLAN
+tag to the driver.
+---
+
+--- a/net/netfilter/nf_flow_table_offload.c
++++ b/net/netfilter/nf_flow_table_offload.c
+@@ -175,28 +175,45 @@ static int flow_offload_eth_src(struct n
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+ {
+- const struct flow_offload_tuple *tuple = &flow->tuplehash[!dir].tuple;
+ struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
+ struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
+- struct net_device *dev;
++ const struct flow_offload_tuple *other_tuple, *this_tuple;
++ struct net_device *dev = NULL;
++ const unsigned char *addr;
+ u32 mask, val;
+ u16 val16;
+
+- dev = dev_get_by_index(net, tuple->iifidx);
+- if (!dev)
+- return -ENOENT;
++ this_tuple = &flow->tuplehash[dir].tuple;
++
++ switch (this_tuple->xmit_type) {
++ case FLOW_OFFLOAD_XMIT_DIRECT:
++ addr = this_tuple->out.h_source;
++ break;
++ case FLOW_OFFLOAD_XMIT_NEIGH:
++ other_tuple = &flow->tuplehash[!dir].tuple;
++ dev = dev_get_by_index(net, other_tuple->iifidx);
++ if (!dev)
++ return -ENOENT;
++
++ addr = dev->dev_addr;
++ break;
++ default:
++ return -EOPNOTSUPP;
++ }
+
+ mask = ~0xffff0000;
+- memcpy(&val16, dev->dev_addr, 2);
++ memcpy(&val16, addr, 2);
+ val = val16 << 16;
+ flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
+ &val, &mask);
+
+ mask = ~0xffffffff;
+- memcpy(&val, dev->dev_addr + 2, 4);
++ memcpy(&val, addr + 2, 4);
+ flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 8,
+ &val, &mask);
+- dev_put(dev);
++
++ if (dev)
++ dev_put(dev);
+
+ return 0;
+ }
+@@ -208,27 +225,40 @@ static int flow_offload_eth_dst(struct n
+ {
+ struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
+ struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
+- const void *daddr = &flow->tuplehash[!dir].tuple.src_v4;
++ const struct flow_offload_tuple *other_tuple, *this_tuple;
+ const struct dst_entry *dst_cache;
+ unsigned char ha[ETH_ALEN];
+ struct neighbour *n;
++ const void *daddr;
+ u32 mask, val;
+ u8 nud_state;
+ u16 val16;
+
+- dst_cache = flow->tuplehash[dir].tuple.dst_cache;
+- n = dst_neigh_lookup(dst_cache, daddr);
+- if (!n)
+- return -ENOENT;
++ this_tuple = &flow->tuplehash[dir].tuple;
+
+- read_lock_bh(&n->lock);
+- nud_state = n->nud_state;
+- ether_addr_copy(ha, n->ha);
+- read_unlock_bh(&n->lock);
++ switch (this_tuple->xmit_type) {
++ case FLOW_OFFLOAD_XMIT_DIRECT:
++ ether_addr_copy(ha, this_tuple->out.h_dest);
++ break;
++ case FLOW_OFFLOAD_XMIT_NEIGH:
++ other_tuple = &flow->tuplehash[!dir].tuple;
++ daddr = &other_tuple->src_v4;
++ dst_cache = this_tuple->dst_cache;
++ n = dst_neigh_lookup(dst_cache, daddr);
++ if (!n)
++ return -ENOENT;
+
+- if (!(nud_state & NUD_VALID)) {
++ read_lock_bh(&n->lock);
++ nud_state = n->nud_state;
++ ether_addr_copy(ha, n->ha);
++ read_unlock_bh(&n->lock);
+ neigh_release(n);
+- return -ENOENT;
++
++ if (!(nud_state & NUD_VALID))
++ return -ENOENT;
++ break;
++ default:
++ return -EOPNOTSUPP;
+ }
+
+ mask = ~0xffffffff;
+@@ -241,7 +271,6 @@ static int flow_offload_eth_dst(struct n
+ val = val16;
+ flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
+ &val, &mask);
+- neigh_release(n);
+
+ return 0;
+ }
+@@ -463,27 +492,52 @@ static void flow_offload_ipv4_checksum(s
+ }
+ }
+
+-static void flow_offload_redirect(const struct flow_offload *flow,
++static void flow_offload_redirect(struct net *net,
++ const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+ {
+- struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
+- struct rtable *rt;
++ const struct flow_offload_tuple *this_tuple, *other_tuple;
++ struct flow_action_entry *entry;
++ struct net_device *dev;
++ int ifindex;
+
+- rt = (struct rtable *)flow->tuplehash[dir].tuple.dst_cache;
++ this_tuple = &flow->tuplehash[dir].tuple;
++ switch (this_tuple->xmit_type) {
++ case FLOW_OFFLOAD_XMIT_DIRECT:
++ this_tuple = &flow->tuplehash[dir].tuple;
++ ifindex = this_tuple->out.ifidx;
++ break;
++ case FLOW_OFFLOAD_XMIT_NEIGH:
++ other_tuple = &flow->tuplehash[!dir].tuple;
++ ifindex = other_tuple->iifidx;
++ break;
++ default:
++ return;
++ }
++
++ dev = dev_get_by_index(net, ifindex);
++ if (!dev)
++ return;
++
++ entry = flow_action_entry_next(flow_rule);
+ entry->id = FLOW_ACTION_REDIRECT;
+- entry->dev = rt->dst.dev;
+- dev_hold(rt->dst.dev);
++ entry->dev = dev;
+ }
+
+ static void flow_offload_encap_tunnel(const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+ {
++ const struct flow_offload_tuple *this_tuple;
+ struct flow_action_entry *entry;
+ struct dst_entry *dst;
+
+- dst = flow->tuplehash[dir].tuple.dst_cache;
++ this_tuple = &flow->tuplehash[dir].tuple;
++ if (this_tuple->xmit_type == FLOW_OFFLOAD_XMIT_DIRECT)
++ return;
++
++ dst = this_tuple->dst_cache;
+ if (dst && dst->lwtstate) {
+ struct ip_tunnel_info *tun_info;
+
+@@ -500,10 +554,15 @@ static void flow_offload_decap_tunnel(co
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+ {
++ const struct flow_offload_tuple *other_tuple;
+ struct flow_action_entry *entry;
+ struct dst_entry *dst;
+
+- dst = flow->tuplehash[!dir].tuple.dst_cache;
++ other_tuple = &flow->tuplehash[!dir].tuple;
++ if (other_tuple->xmit_type == FLOW_OFFLOAD_XMIT_DIRECT)
++ return;
++
++ dst = other_tuple->dst_cache;
+ if (dst && dst->lwtstate) {
+ struct ip_tunnel_info *tun_info;
+
+@@ -515,10 +574,14 @@ static void flow_offload_decap_tunnel(co
+ }
+ }
+
+-int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
+- enum flow_offload_tuple_dir dir,
+- struct nf_flow_rule *flow_rule)
++static int
++nf_flow_rule_route_common(struct net *net, const struct flow_offload *flow,
++ enum flow_offload_tuple_dir dir,
++ struct nf_flow_rule *flow_rule)
+ {
++ const struct flow_offload_tuple *other_tuple;
++ int i;
++
+ flow_offload_decap_tunnel(flow, dir, flow_rule);
+ flow_offload_encap_tunnel(flow, dir, flow_rule);
+
+@@ -526,6 +589,26 @@ int nf_flow_rule_route_ipv4(struct net *
+ flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
+ return -1;
+
++ other_tuple = &flow->tuplehash[!dir].tuple;
++
++ for (i = 0; i < other_tuple->in_vlan_num; i++) {
++ struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
++
++ entry->id = FLOW_ACTION_VLAN_PUSH;
++ entry->vlan.vid = other_tuple->in_vlan[i].id;
++ entry->vlan.proto = other_tuple->in_vlan[i].proto;
++ }
++
++ return 0;
++}
++
++int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
++ enum flow_offload_tuple_dir dir,
++ struct nf_flow_rule *flow_rule)
++{
++ if (nf_flow_rule_route_common(net, flow, dir, flow_rule) < 0)
++ return -1;
++
+ if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
+ flow_offload_ipv4_snat(net, flow, dir, flow_rule);
+ flow_offload_port_snat(net, flow, dir, flow_rule);
+@@ -538,7 +621,7 @@ int nf_flow_rule_route_ipv4(struct net *
+ test_bit(NF_FLOW_DNAT, &flow->flags))
+ flow_offload_ipv4_checksum(net, flow, flow_rule);
+
+- flow_offload_redirect(flow, dir, flow_rule);
++ flow_offload_redirect(net, flow, dir, flow_rule);
+
+ return 0;
+ }
+@@ -548,11 +631,7 @@ int nf_flow_rule_route_ipv6(struct net *
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+ {
+- flow_offload_decap_tunnel(flow, dir, flow_rule);
+- flow_offload_encap_tunnel(flow, dir, flow_rule);
+-
+- if (flow_offload_eth_src(net, flow, dir, flow_rule) < 0 ||
+- flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
++ if (nf_flow_rule_route_common(net, flow, dir, flow_rule) < 0)
+ return -1;
+
+ if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
+@@ -564,7 +643,7 @@ int nf_flow_rule_route_ipv6(struct net *
+ flow_offload_port_dnat(net, flow, dir, flow_rule);
+ }
+
+- flow_offload_redirect(flow, dir, flow_rule);
++ flow_offload_redirect(net, flow, dir, flow_rule);
+
+ return 0;
+ }
+@@ -578,10 +657,10 @@ nf_flow_offload_rule_alloc(struct net *n
+ enum flow_offload_tuple_dir dir)
+ {
+ const struct nf_flowtable *flowtable = offload->flowtable;
++ const struct flow_offload_tuple *tuple, *other_tuple;
+ const struct flow_offload *flow = offload->flow;
+- const struct flow_offload_tuple *tuple;
++ struct dst_entry *other_dst = NULL;
+ struct nf_flow_rule *flow_rule;
+- struct dst_entry *other_dst;
+ int err = -ENOMEM;
+
+ flow_rule = kzalloc(sizeof(*flow_rule), GFP_KERNEL);
+@@ -597,7 +676,10 @@ nf_flow_offload_rule_alloc(struct net *n
+ flow_rule->rule->match.key = &flow_rule->match.key;
+
+ tuple = &flow->tuplehash[dir].tuple;
+- other_dst = flow->tuplehash[!dir].tuple.dst_cache;
++ other_tuple = &flow->tuplehash[!dir].tuple;
++ if (other_tuple->xmit_type == FLOW_OFFLOAD_XMIT_NEIGH)
++ other_dst = other_tuple->dst_cache;
++
+ err = nf_flow_rule_match(&flow_rule->match, tuple, other_dst);
+ if (err < 0)
+ goto err_flow_match;
diff --git a/target/linux/generic/pending-5.10/640-12-netfilter-nft_flow_offload-add-dsa-support.patch b/target/linux/generic/pending-5.10/640-12-netfilter-nft_flow_offload-add-dsa-support.patch
new file mode 100644
index 0000000000..3689a476b3
--- /dev/null
+++ b/target/linux/generic/pending-5.10/640-12-netfilter-nft_flow_offload-add-dsa-support.patch
@@ -0,0 +1,30 @@
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Mon, 18 Jan 2021 22:27:45 +0100
+Subject: [PATCH] netfilter: nft_flow_offload: add dsa support
+
+Replace the master ethernet device by the dsa slave port.
+
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+---
+
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -86,6 +86,7 @@ static void nft_dev_path_info(const stru
+ path = &stack->path[i];
+ switch (path->type) {
+ case DEV_PATH_ETHERNET:
++ case DEV_PATH_DSA:
+ case DEV_PATH_VLAN:
+ info->dev = path->dev;
+ if (is_zero_ether_addr(info->h_source))
+@@ -93,6 +94,10 @@ static void nft_dev_path_info(const stru
+
+ if (path->type == DEV_PATH_ETHERNET)
+ break;
++ if (path->type == DEV_PATH_DSA) {
++ i = stack->num_paths;
++ break;
++ }
+
+ /* DEV_PATH_VLAN */
+ if (info->num_vlans >= NF_FLOW_TABLE_VLAN_MAX) {
diff --git a/target/linux/generic/pending-5.10/640-13-dsa-slave-add-support-for-TC_SETUP_FT.patch b/target/linux/generic/pending-5.10/640-13-dsa-slave-add-support-for-TC_SETUP_FT.patch
new file mode 100644
index 0000000000..58a2064c03
--- /dev/null
+++ b/target/linux/generic/pending-5.10/640-13-dsa-slave-add-support-for-TC_SETUP_FT.patch
@@ -0,0 +1,53 @@
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Mon, 18 Jan 2021 22:39:17 +0100
+Subject: [PATCH] dsa: slave: add support for TC_SETUP_FT
+
+The dsa infrastructure provides a well-defined hierarchy of devices,
+pass up the call to set up the flow block to the master device. From the
+software dataplane, the netfilter infrastructure uses the dsa slave
+devices to refer to the input and output device for the given skbuff.
+Similarly, the flowtable definition in the ruleset refers to the dsa
+slave port devices.
+
+This patch adds the glue code to call ndo_setup_tc with TC_SETUP_FT
+with the master device via the dsa slave devices.
+
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+---
+
+--- a/net/dsa/slave.c
++++ b/net/dsa/slave.c
+@@ -1202,14 +1202,32 @@ static int dsa_slave_setup_tc_block(stru
+ }
+ }
+
++static int dsa_slave_setup_ft_block(struct dsa_switch *ds, int port,
++ void *type_data)
++{
++ struct dsa_port *cpu_dp = dsa_to_port(ds, port)->cpu_dp;
++ struct net_device *master = cpu_dp->master;
++
++ if (!master->netdev_ops->ndo_setup_tc)
++ return -EOPNOTSUPP;
++
++ return master->netdev_ops->ndo_setup_tc(master, TC_SETUP_FT, type_data);
++}
++
+ static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+ {
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+
+- if (type == TC_SETUP_BLOCK)
++ switch (type) {
++ case TC_SETUP_BLOCK:
+ return dsa_slave_setup_tc_block(dev, type_data);
++ case TC_SETUP_FT:
++ return dsa_slave_setup_ft_block(ds, dp->index, type_data);
++ default:
++ break;
++ }
+
+ if (!ds->ops->port_setup_tc)
+ return -EOPNOTSUPP;
diff --git a/target/linux/generic/pending-5.10/640-14-netfilter-nft_flow_offload-add-bridge-vlan-filtering.patch b/target/linux/generic/pending-5.10/640-14-netfilter-nft_flow_offload-add-bridge-vlan-filtering.patch
new file mode 100644
index 0000000000..2e833117db
--- /dev/null
+++ b/target/linux/generic/pending-5.10/640-14-netfilter-nft_flow_offload-add-bridge-vlan-filtering.patch
@@ -0,0 +1,31 @@
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Sun, 24 Jan 2021 18:01:34 +0100
+Subject: [PATCH] netfilter: nft_flow_offload: add bridge vlan filtering
+ support
+
+Add the vlan tag based when PVID is set on.
+
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+---
+
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -112,6 +112,18 @@ static void nft_dev_path_info(const stru
+ if (is_zero_ether_addr(info->h_source))
+ memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN);
+
++ switch (path->bridge.vlan_mode) {
++ case DEV_PATH_BR_VLAN_TAG:
++ info->vid[info->num_vlans] = path->vlan.id;
++ info->vproto[info->num_vlans] = path->vlan.proto;
++ info->num_vlans++;
++ break;
++ case DEV_PATH_BR_VLAN_UNTAG:
++ info->num_vlans--;
++ break;
++ case DEV_PATH_BR_VLAN_KEEP:
++ break;
++ }
+ info->xmit_type = FLOW_OFFLOAD_XMIT_DIRECT;
+ break;
+ default:
diff --git a/target/linux/generic/pending-5.10/640-15-netfilter-nft_flow_offload-use-direct-xmit-if-hardwa.patch b/target/linux/generic/pending-5.10/640-15-netfilter-nft_flow_offload-use-direct-xmit-if-hardwa.patch
new file mode 100644
index 0000000000..1c65a5e218
--- /dev/null
+++ b/target/linux/generic/pending-5.10/640-15-netfilter-nft_flow_offload-use-direct-xmit-if-hardwa.patch
@@ -0,0 +1,51 @@
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Tue, 2 Feb 2021 17:10:07 +0100
+Subject: [PATCH] netfilter: nft_flow_offload: use direct xmit if
+ hardware offload is enabled
+
+If there is a forward path to reach an ethernet device and hardware
+offload is enabled, then use the direct xmit path.
+---
+
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -73,9 +73,18 @@ struct nft_forward_info {
+ enum flow_offload_xmit_type xmit_type;
+ };
+
++static bool nft_is_valid_ether_device(const struct net_device *dev)
++{
++ if (!dev || (dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
++ dev->addr_len != ETH_ALEN || !is_valid_ether_addr(dev->dev_addr))
++ return false;
++
++ return true;
++}
++
+ static void nft_dev_path_info(const struct net_device_path_stack *stack,
+ struct nft_forward_info *info,
+- unsigned char *ha)
++ unsigned char *ha, struct nf_flowtable *flowtable)
+ {
+ const struct net_device_path *path;
+ int i;
+@@ -131,6 +140,10 @@ static void nft_dev_path_info(const stru
+ break;
+ }
+ }
++
++ if (nf_flowtable_hw_offload(flowtable) &&
++ nft_is_valid_ether_device(info->dev))
++ info->xmit_type = FLOW_OFFLOAD_XMIT_DIRECT;
+ }
+
+ static bool nft_flowtable_find_dev(const struct net_device *dev,
+@@ -162,7 +175,7 @@ static void nft_dev_forward_path(struct
+ int i;
+
+ if (nft_dev_fill_forward_path(route, dst, ct, dir, ha, &stack) >= 0)
+- nft_dev_path_info(&stack, &info, ha);
++ nft_dev_path_info(&stack, &info, ha, &ft->data);
+
+ if (!info.dev || !nft_flowtable_find_dev(info.dev, ft))
+ return;
diff --git a/target/linux/generic/pending-5.10/640-16-netfilter-nft_flow_offload-fix-bridge-vlan-tag-handl.patch b/target/linux/generic/pending-5.10/640-16-netfilter-nft_flow_offload-fix-bridge-vlan-tag-handl.patch
new file mode 100644
index 0000000000..5877e91e78
--- /dev/null
+++ b/target/linux/generic/pending-5.10/640-16-netfilter-nft_flow_offload-fix-bridge-vlan-tag-handl.patch
@@ -0,0 +1,22 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Wed, 10 Feb 2021 10:23:38 +0100
+Subject: [PATCH] netfilter: nft_flow_offload: fix bridge vlan tag handling
+
+the brigde type uses the path->bridge.vlan_* fields
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -123,8 +123,8 @@ static void nft_dev_path_info(const stru
+
+ switch (path->bridge.vlan_mode) {
+ case DEV_PATH_BR_VLAN_TAG:
+- info->vid[info->num_vlans] = path->vlan.id;
+- info->vproto[info->num_vlans] = path->vlan.proto;
++ info->vid[info->num_vlans] = path->bridge.vlan_id;
++ info->vproto[info->num_vlans] = path->bridge.vlan_proto;
+ info->num_vlans++;
+ break;
+ case DEV_PATH_BR_VLAN_UNTAG:
diff --git a/target/linux/generic/pending-5.10/640-17-netfilter-flowtable-rework-ingress-vlan-matching.patch b/target/linux/generic/pending-5.10/640-17-netfilter-flowtable-rework-ingress-vlan-matching.patch
new file mode 100644
index 0000000000..67ec263f51
--- /dev/null
+++ b/target/linux/generic/pending-5.10/640-17-netfilter-flowtable-rework-ingress-vlan-matching.patch
@@ -0,0 +1,143 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Wed, 10 Feb 2021 19:39:23 +0100
+Subject: [PATCH] netfilter: flowtable: rework ingress vlan matching
+
+When dealing with bridges with VLAN filtering and DSA/switchdev offload,
+the hardware could offload adding a VLAN tag configured in the bridge.
+Since there doesn't seem to be an easy way to detect that, this patch
+reworks the code to optionally match the last VLAN tag that would otherwise
+be inserted by the bridge.
+This matters when bypassing the bridge and attaching an ingress hook on
+a DSA port below it.
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/include/net/netfilter/nf_flow_table.h
++++ b/include/net/netfilter/nf_flow_table.h
+@@ -115,14 +115,15 @@ struct flow_offload_tuple {
+
+ u8 l3proto;
+ u8 l4proto;
+- struct {
+- u16 id;
+- __be16 proto;
+- } in_vlan[NF_FLOW_TABLE_VLAN_MAX];
+
+ /* All members above are keys for lookups, see flow_offload_hash(). */
+ struct { } __hash;
+
++ struct {
++ u16 id;
++ __be16 proto;
++ } in_vlan[NF_FLOW_TABLE_VLAN_MAX], in_pvid;
++
+ u8 dir:4,
+ xmit_type:2,
+ in_vlan_num:2;
+--- a/net/netfilter/nf_flow_table_ip.c
++++ b/net/netfilter/nf_flow_table_ip.c
+@@ -281,12 +281,13 @@ static bool nf_flow_skb_vlan_protocol(co
+ }
+
+ static void nf_flow_vlan_pop(struct sk_buff *skb,
+- struct flow_offload_tuple_rhash *tuplehash)
++ struct flow_offload_tuple_rhash *tuplehash,
++ bool strip_pvid)
+ {
+ struct vlan_hdr *vlan_hdr;
+ int i;
+
+- for (i = 0; i < tuplehash->tuple.in_vlan_num; i++) {
++ for (i = 0; i < tuplehash->tuple.in_vlan_num + strip_pvid; i++) {
+ if (skb_vlan_tag_present(skb)) {
+ __vlan_hwaccel_clear_tag(skb);
+ continue;
+@@ -316,6 +317,31 @@ static unsigned int nf_flow_queue_xmit(s
+ return NF_STOLEN;
+ }
+
++static bool
++nf_flow_offload_check_vlan(struct flow_offload_tuple *tuple,
++ struct flow_offload_tuple *flow_tuple,
++ bool *strip_pvid)
++{
++ int i, cur = 0;
++
++ if (flow_tuple->in_pvid.proto &&
++ !memcmp(&tuple->in_vlan[0], &flow_tuple->in_pvid,
++ sizeof(tuple->in_vlan[0])))
++ cur++;
++
++ *strip_pvid = cur;
++
++ for (i = 0; i < flow_tuple->in_vlan_num; i++, cur++) {
++ if (!memcmp(&tuple->in_vlan[cur], &flow_tuple->in_vlan[i],
++ sizeof(tuple->in_vlan[0])))
++ continue;
++
++ return false;
++ }
++
++ return true;
++}
++
+ unsigned int
+ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+@@ -329,6 +355,7 @@ nf_flow_offload_ip_hook(void *priv, stru
+ struct rtable *rt;
+ unsigned int thoff;
+ struct iphdr *iph;
++ bool strip_pvid;
+ __be32 nexthop;
+ u32 offset = 0;
+ int ret;
+@@ -344,6 +371,10 @@ nf_flow_offload_ip_hook(void *priv, stru
+ if (tuplehash == NULL)
+ return NF_ACCEPT;
+
++ if (!nf_flow_offload_check_vlan(&tuple, &tuplehash->tuple,
++ &strip_pvid))
++ return NF_ACCEPT;
++
+ dir = tuplehash->tuple.dir;
+ flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
+
+@@ -368,7 +399,7 @@ nf_flow_offload_ip_hook(void *priv, stru
+ return NF_ACCEPT;
+ }
+
+- nf_flow_vlan_pop(skb, tuplehash);
++ nf_flow_vlan_pop(skb, tuplehash, strip_pvid);
+ thoff -= offset;
+
+ if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0)
+@@ -596,6 +627,7 @@ nf_flow_offload_ipv6_hook(void *priv, st
+ struct net_device *outdev;
+ struct ipv6hdr *ip6h;
+ struct rt6_info *rt;
++ bool strip_pvid;
+ u32 offset = 0;
+ int ret;
+
+@@ -610,6 +642,10 @@ nf_flow_offload_ipv6_hook(void *priv, st
+ if (tuplehash == NULL)
+ return NF_ACCEPT;
+
++ if (!nf_flow_offload_check_vlan(&tuple, &tuplehash->tuple,
++ &strip_pvid))
++ return NF_ACCEPT;
++
+ dir = tuplehash->tuple.dir;
+ flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
+
+@@ -630,7 +666,7 @@ nf_flow_offload_ipv6_hook(void *priv, st
+ return NF_ACCEPT;
+ }
+
+- nf_flow_vlan_pop(skb, tuplehash);
++ nf_flow_vlan_pop(skb, tuplehash, strip_pvid);
+
+ if (skb_try_make_writable(skb, sizeof(*ip6h)))
+ return NF_DROP;
diff --git a/target/linux/generic/pending-5.10/640-18-netfilter-flowtable-handle-bridge-vlan-filter-offloa.patch b/target/linux/generic/pending-5.10/640-18-netfilter-flowtable-handle-bridge-vlan-filter-offloa.patch
new file mode 100644
index 0000000000..ba480f3520
--- /dev/null
+++ b/target/linux/generic/pending-5.10/640-18-netfilter-flowtable-handle-bridge-vlan-filter-offloa.patch
@@ -0,0 +1,106 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Wed, 10 Feb 2021 19:44:33 +0100
+Subject: [PATCH] netfilter: flowtable: handle bridge vlan filter offload
+ tags from DSA/switchdev
+
+When a switchdev/DSA port is an untagged member of a bridge vlan, ingress
+packets could potentially be tagged with the id of the VLAN.
+When the VLAN port group has been uploaded to switchdev, report the bridge
+tag mode as DEV_PATH_BR_VLAN_UNTAG_HW instead of DEV_PATH_BR_VLAN_UNTAG
+and handle it in netfilter flow offloading by storing the tag in the tuple
+in_pvid field.
+This allows the ingress hook to detect the optional tag and remove it for
+software offload. This tag processing is for ingress only, egress needs to be
+fully untagged
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -853,6 +853,7 @@ struct net_device_path {
+ DEV_PATH_BR_VLAN_KEEP,
+ DEV_PATH_BR_VLAN_TAG,
+ DEV_PATH_BR_VLAN_UNTAG,
++ DEV_PATH_BR_VLAN_UNTAG_HW,
+ } vlan_mode;
+ u16 vlan_id;
+ __be16 vlan_proto;
+--- a/include/net/netfilter/nf_flow_table.h
++++ b/include/net/netfilter/nf_flow_table.h
+@@ -183,6 +183,10 @@ struct nf_flow_route {
+ u32 ifindex;
+ u16 vid[NF_FLOW_TABLE_VLAN_MAX];
+ __be16 vproto[NF_FLOW_TABLE_VLAN_MAX];
++ struct {
++ u16 id;
++ __be16 proto;
++ } pvid;
+ u8 num_vlans;
+ } in;
+ struct {
+--- a/net/bridge/br_device.c
++++ b/net/bridge/br_device.c
+@@ -435,6 +435,7 @@ static int br_fill_forward_path(struct n
+ ctx->vlan[ctx->num_vlans].proto = path->bridge.vlan_proto;
+ ctx->num_vlans++;
+ break;
++ case DEV_PATH_BR_VLAN_UNTAG_HW:
+ case DEV_PATH_BR_VLAN_UNTAG:
+ ctx->num_vlans--;
+ break;
+--- a/net/bridge/br_vlan.c
++++ b/net/bridge/br_vlan.c
+@@ -1374,6 +1374,8 @@ int br_vlan_fill_forward_path_mode(struc
+
+ if (path->bridge.vlan_mode == DEV_PATH_BR_VLAN_TAG)
+ path->bridge.vlan_mode = DEV_PATH_BR_VLAN_KEEP;
++ else if (v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
++ path->bridge.vlan_mode = DEV_PATH_BR_VLAN_UNTAG_HW;
+ else
+ path->bridge.vlan_mode = DEV_PATH_BR_VLAN_UNTAG;
+
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -98,6 +98,8 @@ static int flow_offload_fill_route(struc
+ j++;
+ }
+ flow_tuple->in_vlan_num = route->tuple[dir].in.num_vlans;
++ flow_tuple->in_pvid.id = route->tuple[dir].in.pvid.id;
++ flow_tuple->in_pvid.proto = route->tuple[dir].in.pvid.proto;
+
+ switch (route->tuple[dir].xmit_type) {
+ case FLOW_OFFLOAD_XMIT_DIRECT:
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -67,6 +67,10 @@ struct nft_forward_info {
+ const struct net_device *dev;
+ __u16 vid[NF_FLOW_TABLE_VLAN_MAX];
+ __be16 vproto[NF_FLOW_TABLE_VLAN_MAX];
++ struct {
++ __u16 id;
++ __be16 proto;
++ } pvid;
+ u8 num_vlans;
+ u8 h_source[ETH_ALEN];
+ u8 h_dest[ETH_ALEN];
+@@ -127,6 +131,10 @@ static void nft_dev_path_info(const stru
+ info->vproto[info->num_vlans] = path->bridge.vlan_proto;
+ info->num_vlans++;
+ break;
++ case DEV_PATH_BR_VLAN_UNTAG_HW:
++ info->pvid.id = info->vid[info->num_vlans - 1];
++ info->pvid.proto = info->vproto[info->num_vlans - 1];
++ fallthrough;
+ case DEV_PATH_BR_VLAN_UNTAG:
+ info->num_vlans--;
+ break;
+@@ -185,6 +193,8 @@ static void nft_dev_forward_path(struct
+ route->tuple[!dir].in.vid[i] = info.vid[i];
+ route->tuple[!dir].in.vproto[i] = info.vproto[i];
+ }
++ route->tuple[!dir].in.pvid.id = info.pvid.id;
++ route->tuple[!dir].in.pvid.proto = info.pvid.proto;
+ route->tuple[!dir].in.num_vlans = info.num_vlans;
+
+ if (info.xmit_type == FLOW_OFFLOAD_XMIT_DIRECT) {
diff --git a/target/linux/generic/pending-5.10/655-increase_skb_pad.patch b/target/linux/generic/pending-5.10/655-increase_skb_pad.patch
new file mode 100644
index 0000000000..ff39ddb8b3
--- /dev/null
+++ b/target/linux/generic/pending-5.10/655-increase_skb_pad.patch
@@ -0,0 +1,20 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: kernel: add a few patches for avoiding unnecessary skb reallocations - significantly improves ethernet<->wireless performance
+
+lede-commit: 6f89cffc9add6939d44a6b54cf9a5e77849aa7fd
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ include/linux/skbuff.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -2676,7 +2676,7 @@ static inline int pskb_network_may_pull(
+ * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
+ */
+ #ifndef NET_SKB_PAD
+-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
++#define NET_SKB_PAD max(64, L1_CACHE_BYTES)
+ #endif
+
+ int ___pskb_trim(struct sk_buff *skb, unsigned int len);
diff --git a/target/linux/generic/pending-5.10/666-Add-support-for-MAP-E-FMRs-mesh-mode.patch b/target/linux/generic/pending-5.10/666-Add-support-for-MAP-E-FMRs-mesh-mode.patch
new file mode 100644
index 0000000000..924baa0607
--- /dev/null
+++ b/target/linux/generic/pending-5.10/666-Add-support-for-MAP-E-FMRs-mesh-mode.patch
@@ -0,0 +1,511 @@
+From: Steven Barth <steven@midlink.org>
+Subject: Add support for MAP-E FMRs (mesh mode)
+
+MAP-E FMRs (draft-ietf-softwire-map-10) are rules for IPv4-communication
+between MAP CEs (mesh mode) without the need to forward such data to a
+border relay. This is similar to how 6rd works but for IPv4 over IPv6.
+
+Signed-off-by: Steven Barth <cyrus@openwrt.org>
+---
+ include/net/ip6_tunnel.h | 13 ++
+ include/uapi/linux/if_tunnel.h | 13 ++
+ net/ipv6/ip6_tunnel.c | 276 +++++++++++++++++++++++++++++++++++++++--
+ 3 files changed, 291 insertions(+), 11 deletions(-)
+
+--- a/include/net/ip6_tunnel.h
++++ b/include/net/ip6_tunnel.h
+@@ -18,6 +18,18 @@
+ /* determine capability on a per-packet basis */
+ #define IP6_TNL_F_CAP_PER_PACKET 0x40000
+
++/* IPv6 tunnel FMR */
++struct __ip6_tnl_fmr {
++ struct __ip6_tnl_fmr *next; /* next fmr in list */
++ struct in6_addr ip6_prefix;
++ struct in_addr ip4_prefix;
++
++ __u8 ip6_prefix_len;
++ __u8 ip4_prefix_len;
++ __u8 ea_len;
++ __u8 offset;
++};
++
+ struct __ip6_tnl_parm {
+ char name[IFNAMSIZ]; /* name of tunnel device */
+ int link; /* ifindex of underlying L2 interface */
+@@ -29,6 +41,7 @@ struct __ip6_tnl_parm {
+ __u32 flags; /* tunnel flags */
+ struct in6_addr laddr; /* local tunnel end-point address */
+ struct in6_addr raddr; /* remote tunnel end-point address */
++ struct __ip6_tnl_fmr *fmrs; /* FMRs */
+
+ __be16 i_flags;
+ __be16 o_flags;
+--- a/include/uapi/linux/if_tunnel.h
++++ b/include/uapi/linux/if_tunnel.h
+@@ -77,10 +77,23 @@ enum {
+ IFLA_IPTUN_ENCAP_DPORT,
+ IFLA_IPTUN_COLLECT_METADATA,
+ IFLA_IPTUN_FWMARK,
++ IFLA_IPTUN_FMRS,
+ __IFLA_IPTUN_MAX,
+ };
+ #define IFLA_IPTUN_MAX (__IFLA_IPTUN_MAX - 1)
+
++enum {
++ IFLA_IPTUN_FMR_UNSPEC,
++ IFLA_IPTUN_FMR_IP6_PREFIX,
++ IFLA_IPTUN_FMR_IP4_PREFIX,
++ IFLA_IPTUN_FMR_IP6_PREFIX_LEN,
++ IFLA_IPTUN_FMR_IP4_PREFIX_LEN,
++ IFLA_IPTUN_FMR_EA_LEN,
++ IFLA_IPTUN_FMR_OFFSET,
++ __IFLA_IPTUN_FMR_MAX,
++};
++#define IFLA_IPTUN_FMR_MAX (__IFLA_IPTUN_FMR_MAX - 1)
++
+ enum tunnel_encap_types {
+ TUNNEL_ENCAP_NONE,
+ TUNNEL_ENCAP_FOU,
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -11,6 +11,9 @@
+ * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c
+ *
+ * RFC 2473
++ *
++ * Changes:
++ * Steven Barth <cyrus@openwrt.org>: MAP-E FMR support
+ */
+
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+@@ -67,9 +70,9 @@ static bool log_ecn_error = true;
+ module_param(log_ecn_error, bool, 0644);
+ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
+
+-static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
++static u32 HASH(const struct in6_addr *addr)
+ {
+- u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
++ u32 hash = ipv6_addr_hash(addr);
+
+ return hash_32(hash, IP6_TUNNEL_HASH_SIZE_SHIFT);
+ }
+@@ -144,17 +147,33 @@ static struct ip6_tnl *
+ ip6_tnl_lookup(struct net *net, int link,
+ const struct in6_addr *remote, const struct in6_addr *local)
+ {
+- unsigned int hash = HASH(remote, local);
++ unsigned int hash = HASH(local);
+ struct ip6_tnl *t, *cand = NULL;
+ struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
+ struct in6_addr any;
+
+ for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
+ if (!ipv6_addr_equal(local, &t->parms.laddr) ||
+- !ipv6_addr_equal(remote, &t->parms.raddr) ||
+ !(t->dev->flags & IFF_UP))
+ continue;
+
++ if (!ipv6_addr_equal(remote, &t->parms.raddr)) {
++ struct __ip6_tnl_fmr *fmr;
++ bool found = false;
++
++ for (fmr = t->parms.fmrs; fmr; fmr = fmr->next) {
++ if (!ipv6_prefix_equal(remote, &fmr->ip6_prefix,
++ fmr->ip6_prefix_len))
++ continue;
++
++ found = true;
++ break;
++ }
++
++ if (!found)
++ continue;
++ }
++
+ if (link == t->parms.link)
+ return t;
+ else
+@@ -162,7 +181,7 @@ ip6_tnl_lookup(struct net *net, int link
+ }
+
+ memset(&any, 0, sizeof(any));
+- hash = HASH(&any, local);
++ hash = HASH(local);
+ for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
+ if (!ipv6_addr_equal(local, &t->parms.laddr) ||
+ !ipv6_addr_any(&t->parms.raddr) ||
+@@ -175,7 +194,7 @@ ip6_tnl_lookup(struct net *net, int link
+ cand = t;
+ }
+
+- hash = HASH(remote, &any);
++ hash = HASH(&any);
+ for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
+ if (!ipv6_addr_equal(remote, &t->parms.raddr) ||
+ !ipv6_addr_any(&t->parms.laddr) ||
+@@ -223,7 +242,7 @@ ip6_tnl_bucket(struct ip6_tnl_net *ip6n,
+
+ if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
+ prio = 1;
+- h = HASH(remote, local);
++ h = HASH(local);
+ }
+ return &ip6n->tnls[prio][h];
+ }
+@@ -406,6 +425,12 @@ ip6_tnl_dev_uninit(struct net_device *de
+ struct net *net = t->net;
+ struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
+
++ while (t->parms.fmrs) {
++ struct __ip6_tnl_fmr *next = t->parms.fmrs->next;
++ kfree(t->parms.fmrs);
++ t->parms.fmrs = next;
++ }
++
+ if (dev == ip6n->fb_tnl_dev)
+ RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
+ else
+@@ -822,6 +847,107 @@ int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
+ }
+ EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
+
++/**
++ * ip4ip6_fmr_calc - calculate target / source IPv6-address based on FMR
++ * @dest: destination IPv6 address buffer
++ * @skb: received socket buffer
++ * @fmr: MAP FMR
++ * @xmit: Calculate for xmit or rcv
++ **/
++static void ip4ip6_fmr_calc(struct in6_addr *dest,
++ const struct iphdr *iph, const uint8_t *end,
++ const struct __ip6_tnl_fmr *fmr, bool xmit)
++{
++ int psidlen = fmr->ea_len - (32 - fmr->ip4_prefix_len);
++ u8 *portp = NULL;
++ bool use_dest_addr;
++ const struct iphdr *dsth = iph;
++
++ if ((u8*)dsth >= end)
++ return;
++
++ /* find significant IP header */
++ if (iph->protocol == IPPROTO_ICMP) {
++ struct icmphdr *ih = (struct icmphdr*)(((u8*)dsth) + dsth->ihl * 4);
++ if (ih && ((u8*)&ih[1]) <= end && (
++ ih->type == ICMP_DEST_UNREACH ||
++ ih->type == ICMP_SOURCE_QUENCH ||
++ ih->type == ICMP_TIME_EXCEEDED ||
++ ih->type == ICMP_PARAMETERPROB ||
++ ih->type == ICMP_REDIRECT))
++ dsth = (const struct iphdr*)&ih[1];
++ }
++
++ /* in xmit-path use dest port by default and source port only if
++ this is an ICMP reply to something else; vice versa in rcv-path */
++ use_dest_addr = (xmit && dsth == iph) || (!xmit && dsth != iph);
++
++ /* get dst port */
++ if (((u8*)&dsth[1]) <= end && (
++ dsth->protocol == IPPROTO_UDP ||
++ dsth->protocol == IPPROTO_TCP ||
++ dsth->protocol == IPPROTO_SCTP ||
++ dsth->protocol == IPPROTO_DCCP)) {
++ /* for UDP, TCP, SCTP and DCCP source and dest port
++ follow IPv4 header directly */
++ portp = ((u8*)dsth) + dsth->ihl * 4;
++
++ if (use_dest_addr)
++ portp += sizeof(u16);
++ } else if (iph->protocol == IPPROTO_ICMP) {
++ struct icmphdr *ih = (struct icmphdr*)(((u8*)dsth) + dsth->ihl * 4);
++
++ /* use icmp identifier as port */
++ if (((u8*)&ih) <= end && (
++ (use_dest_addr && (
++ ih->type == ICMP_ECHOREPLY ||
++ ih->type == ICMP_TIMESTAMPREPLY ||
++ ih->type == ICMP_INFO_REPLY ||
++ ih->type == ICMP_ADDRESSREPLY)) ||
++ (!use_dest_addr && (
++ ih->type == ICMP_ECHO ||
++ ih->type == ICMP_TIMESTAMP ||
++ ih->type == ICMP_INFO_REQUEST ||
++ ih->type == ICMP_ADDRESS)
++ )))
++ portp = (u8*)&ih->un.echo.id;
++ }
++
++ if ((portp && &portp[2] <= end) || psidlen == 0) {
++ int frombyte = fmr->ip6_prefix_len / 8;
++ int fromrem = fmr->ip6_prefix_len % 8;
++ int bytes = sizeof(struct in6_addr) - frombyte;
++ const u32 *addr = (use_dest_addr) ? &iph->daddr : &iph->saddr;
++ u64 eabits = ((u64)ntohl(*addr)) << (32 + fmr->ip4_prefix_len);
++ u64 t = 0;
++
++ /* extract PSID from port and add it to eabits */
++ u16 psidbits = 0;
++ if (psidlen > 0) {
++ psidbits = ((u16)portp[0]) << 8 | ((u16)portp[1]);
++ psidbits >>= 16 - psidlen - fmr->offset;
++ psidbits = (u16)(psidbits << (16 - psidlen));
++ eabits |= ((u64)psidbits) << (48 - (fmr->ea_len - psidlen));
++ }
++
++ /* rewrite destination address */
++ *dest = fmr->ip6_prefix;
++ memcpy(&dest->s6_addr[10], addr, sizeof(*addr));
++ dest->s6_addr16[7] = htons(psidbits >> (16 - psidlen));
++
++ if (bytes > sizeof(u64))
++ bytes = sizeof(u64);
++
++ /* insert eabits */
++ memcpy(&t, &dest->s6_addr[frombyte], bytes);
++ t = be64_to_cpu(t) & ~(((((u64)1) << fmr->ea_len) - 1)
++ << (64 - fmr->ea_len - fromrem));
++ t = cpu_to_be64(t | (eabits >> fromrem));
++ memcpy(&dest->s6_addr[frombyte], &t, bytes);
++ }
++}
++
++
+ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
+ const struct tnl_ptk_info *tpi,
+ struct metadata_dst *tun_dst,
+@@ -874,6 +1000,27 @@ static int __ip6_tnl_rcv(struct ip6_tnl
+ skb_reset_network_header(skb);
+ memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
+
++ if (tpi->proto == htons(ETH_P_IP) && tunnel->parms.fmrs &&
++ !ipv6_addr_equal(&ipv6h->saddr, &tunnel->parms.raddr)) {
++ /* Packet didn't come from BR, so lookup FMR */
++ struct __ip6_tnl_fmr *fmr;
++ struct in6_addr expected = tunnel->parms.raddr;
++ for (fmr = tunnel->parms.fmrs; fmr; fmr = fmr->next)
++ if (ipv6_prefix_equal(&ipv6h->saddr,
++ &fmr->ip6_prefix, fmr->ip6_prefix_len))
++ break;
++
++ /* Check that IPv6 matches IPv4 source to prevent spoofing */
++ if (fmr)
++ ip4ip6_fmr_calc(&expected, ip_hdr(skb),
++ skb_tail_pointer(skb), fmr, false);
++
++ if (!ipv6_addr_equal(&ipv6h->saddr, &expected)) {
++ rcu_read_unlock();
++ goto drop;
++ }
++ }
++
+ __skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
+
+ err = dscp_ecn_decapsulate(tunnel, ipv6h, skb);
+@@ -1025,6 +1172,7 @@ static void init_tel_txopt(struct ipv6_t
+ opt->ops.opt_nflen = 8;
+ }
+
++
+ /**
+ * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
+ * @t: the outgoing tunnel device
+@@ -1307,6 +1455,7 @@ ipxip6_tnl_xmit(struct sk_buff *skb, str
+ u8 protocol)
+ {
+ struct ip6_tnl *t = netdev_priv(dev);
++ struct __ip6_tnl_fmr *fmr;
+ struct ipv6hdr *ipv6h;
+ const struct iphdr *iph;
+ int encap_limit = -1;
+@@ -1406,6 +1555,18 @@ ipxip6_tnl_xmit(struct sk_buff *skb, str
+ fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+ dsfield = INET_ECN_encapsulate(dsfield, orig_dsfield);
+
++ /* try to find matching FMR */
++ for (fmr = t->parms.fmrs; fmr; fmr = fmr->next) {
++ unsigned mshift = 32 - fmr->ip4_prefix_len;
++ if (ntohl(fmr->ip4_prefix.s_addr) >> mshift ==
++ ntohl(ip_hdr(skb)->daddr) >> mshift)
++ break;
++ }
++
++ /* change dstaddr according to FMR */
++ if (fmr)
++ ip4ip6_fmr_calc(&fl6.daddr, ip_hdr(skb), skb_tail_pointer(skb), fmr, true);
++
+ if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
+ return -1;
+
+@@ -1556,6 +1717,14 @@ ip6_tnl_change(struct ip6_tnl *t, const
+ t->parms.link = p->link;
+ t->parms.proto = p->proto;
+ t->parms.fwmark = p->fwmark;
++
++ while (t->parms.fmrs) {
++ struct __ip6_tnl_fmr *next = t->parms.fmrs->next;
++ kfree(t->parms.fmrs);
++ t->parms.fmrs = next;
++ }
++ t->parms.fmrs = p->fmrs;
++
+ dst_cache_reset(&t->dst_cache);
+ ip6_tnl_link_config(t);
+ return 0;
+@@ -1594,6 +1763,7 @@ ip6_tnl_parm_from_user(struct __ip6_tnl_
+ p->flowinfo = u->flowinfo;
+ p->link = u->link;
+ p->proto = u->proto;
++ p->fmrs = NULL;
+ memcpy(p->name, u->name, sizeof(u->name));
+ }
+
+@@ -1979,6 +2149,15 @@ static int ip6_tnl_validate(struct nlatt
+ return 0;
+ }
+
++static const struct nla_policy ip6_tnl_fmr_policy[IFLA_IPTUN_FMR_MAX + 1] = {
++ [IFLA_IPTUN_FMR_IP6_PREFIX] = { .len = sizeof(struct in6_addr) },
++ [IFLA_IPTUN_FMR_IP4_PREFIX] = { .len = sizeof(struct in_addr) },
++ [IFLA_IPTUN_FMR_IP6_PREFIX_LEN] = { .type = NLA_U8 },
++ [IFLA_IPTUN_FMR_IP4_PREFIX_LEN] = { .type = NLA_U8 },
++ [IFLA_IPTUN_FMR_EA_LEN] = { .type = NLA_U8 },
++ [IFLA_IPTUN_FMR_OFFSET] = { .type = NLA_U8 }
++};
++
+ static void ip6_tnl_netlink_parms(struct nlattr *data[],
+ struct __ip6_tnl_parm *parms)
+ {
+@@ -2016,6 +2195,46 @@ static void ip6_tnl_netlink_parms(struct
+
+ if (data[IFLA_IPTUN_FWMARK])
+ parms->fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]);
++
++ if (data[IFLA_IPTUN_FMRS]) {
++ unsigned rem;
++ struct nlattr *fmr;
++ nla_for_each_nested(fmr, data[IFLA_IPTUN_FMRS], rem) {
++ struct nlattr *fmrd[IFLA_IPTUN_FMR_MAX + 1], *c;
++ struct __ip6_tnl_fmr *nfmr;
++
++ nla_parse_nested(fmrd, IFLA_IPTUN_FMR_MAX,
++ fmr, ip6_tnl_fmr_policy, NULL);
++
++ if (!(nfmr = kzalloc(sizeof(*nfmr), GFP_KERNEL)))
++ continue;
++
++ nfmr->offset = 6;
++
++ if ((c = fmrd[IFLA_IPTUN_FMR_IP6_PREFIX]))
++ nla_memcpy(&nfmr->ip6_prefix, fmrd[IFLA_IPTUN_FMR_IP6_PREFIX],
++ sizeof(nfmr->ip6_prefix));
++
++ if ((c = fmrd[IFLA_IPTUN_FMR_IP4_PREFIX]))
++ nla_memcpy(&nfmr->ip4_prefix, fmrd[IFLA_IPTUN_FMR_IP4_PREFIX],
++ sizeof(nfmr->ip4_prefix));
++
++ if ((c = fmrd[IFLA_IPTUN_FMR_IP6_PREFIX_LEN]))
++ nfmr->ip6_prefix_len = nla_get_u8(c);
++
++ if ((c = fmrd[IFLA_IPTUN_FMR_IP4_PREFIX_LEN]))
++ nfmr->ip4_prefix_len = nla_get_u8(c);
++
++ if ((c = fmrd[IFLA_IPTUN_FMR_EA_LEN]))
++ nfmr->ea_len = nla_get_u8(c);
++
++ if ((c = fmrd[IFLA_IPTUN_FMR_OFFSET]))
++ nfmr->offset = nla_get_u8(c);
++
++ nfmr->next = parms->fmrs;
++ parms->fmrs = nfmr;
++ }
++ }
+ }
+
+ static bool ip6_tnl_netlink_encap_parms(struct nlattr *data[],
+@@ -2131,6 +2350,12 @@ static void ip6_tnl_dellink(struct net_d
+
+ static size_t ip6_tnl_get_size(const struct net_device *dev)
+ {
++ const struct ip6_tnl *t = netdev_priv(dev);
++ struct __ip6_tnl_fmr *c;
++ int fmrs = 0;
++ for (c = t->parms.fmrs; c; c = c->next)
++ ++fmrs;
++
+ return
+ /* IFLA_IPTUN_LINK */
+ nla_total_size(4) +
+@@ -2160,6 +2385,24 @@ static size_t ip6_tnl_get_size(const str
+ nla_total_size(0) +
+ /* IFLA_IPTUN_FWMARK */
+ nla_total_size(4) +
++ /* IFLA_IPTUN_FMRS */
++ nla_total_size(0) +
++ (
++ /* nest */
++ nla_total_size(0) +
++ /* IFLA_IPTUN_FMR_IP6_PREFIX */
++ nla_total_size(sizeof(struct in6_addr)) +
++ /* IFLA_IPTUN_FMR_IP4_PREFIX */
++ nla_total_size(sizeof(struct in_addr)) +
++ /* IFLA_IPTUN_FMR_EA_LEN */
++ nla_total_size(1) +
++ /* IFLA_IPTUN_FMR_IP6_PREFIX_LEN */
++ nla_total_size(1) +
++ /* IFLA_IPTUN_FMR_IP4_PREFIX_LEN */
++ nla_total_size(1) +
++ /* IFLA_IPTUN_FMR_OFFSET */
++ nla_total_size(1)
++ ) * fmrs +
+ 0;
+ }
+
+@@ -2167,6 +2410,9 @@ static int ip6_tnl_fill_info(struct sk_b
+ {
+ struct ip6_tnl *tunnel = netdev_priv(dev);
+ struct __ip6_tnl_parm *parm = &tunnel->parms;
++ struct __ip6_tnl_fmr *c;
++ int fmrcnt = 0;
++ struct nlattr *fmrs;
+
+ if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
+ nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) ||
+@@ -2176,9 +2422,27 @@ static int ip6_tnl_fill_info(struct sk_b
+ nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
+ nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) ||
+ nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto) ||
+- nla_put_u32(skb, IFLA_IPTUN_FWMARK, parm->fwmark))
++ nla_put_u32(skb, IFLA_IPTUN_FWMARK, parm->fwmark) ||
++ !(fmrs = nla_nest_start(skb, IFLA_IPTUN_FMRS)))
+ goto nla_put_failure;
+
++ for (c = parm->fmrs; c; c = c->next) {
++ struct nlattr *fmr = nla_nest_start(skb, ++fmrcnt);
++ if (!fmr ||
++ nla_put(skb, IFLA_IPTUN_FMR_IP6_PREFIX,
++ sizeof(c->ip6_prefix), &c->ip6_prefix) ||
++ nla_put(skb, IFLA_IPTUN_FMR_IP4_PREFIX,
++ sizeof(c->ip4_prefix), &c->ip4_prefix) ||
++ nla_put_u8(skb, IFLA_IPTUN_FMR_IP6_PREFIX_LEN, c->ip6_prefix_len) ||
++ nla_put_u8(skb, IFLA_IPTUN_FMR_IP4_PREFIX_LEN, c->ip4_prefix_len) ||
++ nla_put_u8(skb, IFLA_IPTUN_FMR_EA_LEN, c->ea_len) ||
++ nla_put_u8(skb, IFLA_IPTUN_FMR_OFFSET, c->offset))
++ goto nla_put_failure;
++
++ nla_nest_end(skb, fmr);
++ }
++ nla_nest_end(skb, fmrs);
++
+ if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) ||
+ nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) ||
+ nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) ||
+@@ -2218,6 +2482,7 @@ static const struct nla_policy ip6_tnl_p
+ [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
+ [IFLA_IPTUN_COLLECT_METADATA] = { .type = NLA_FLAG },
+ [IFLA_IPTUN_FWMARK] = { .type = NLA_U32 },
++ [IFLA_IPTUN_FMRS] = { .type = NLA_NESTED },
+ };
+
+ static struct rtnl_link_ops ip6_link_ops __read_mostly = {
diff --git a/target/linux/generic/pending-5.10/670-ipv6-allow-rejecting-with-source-address-failed-policy.patch b/target/linux/generic/pending-5.10/670-ipv6-allow-rejecting-with-source-address-failed-policy.patch
new file mode 100644
index 0000000000..fda982fce9
--- /dev/null
+++ b/target/linux/generic/pending-5.10/670-ipv6-allow-rejecting-with-source-address-failed-policy.patch
@@ -0,0 +1,263 @@
+From: Jonas Gorski <jogo@openwrt.org>
+Subject: ipv6: allow rejecting with "source address failed policy"
+
+RFC6204 L-14 requires rejecting traffic from invalid addresses with
+ICMPv6 Destination Unreachable, Code 5 (Source address failed ingress/
+egress policy) on the LAN side, so add an appropriate rule for that.
+
+Signed-off-by: Jonas Gorski <jogo@openwrt.org>
+---
+ include/net/netns/ipv6.h | 1 +
+ include/uapi/linux/fib_rules.h | 4 +++
+ include/uapi/linux/rtnetlink.h | 1 +
+ net/ipv4/fib_semantics.c | 4 +++
+ net/ipv4/fib_trie.c | 1 +
+ net/ipv4/ipmr.c | 1 +
+ net/ipv6/fib6_rules.c | 4 +++
+ net/ipv6/ip6mr.c | 2 ++
+ net/ipv6/route.c | 58 +++++++++++++++++++++++++++++++++++++++++-
+ 9 files changed, 75 insertions(+), 1 deletion(-)
+
+--- a/include/net/netns/ipv6.h
++++ b/include/net/netns/ipv6.h
+@@ -87,6 +87,7 @@ struct netns_ipv6 {
+ unsigned int fib6_routes_require_src;
+ #endif
+ struct rt6_info *ip6_prohibit_entry;
++ struct rt6_info *ip6_policy_failed_entry;
+ struct rt6_info *ip6_blk_hole_entry;
+ struct fib6_table *fib6_local_tbl;
+ struct fib_rules_ops *fib6_rules_ops;
+--- a/include/uapi/linux/fib_rules.h
++++ b/include/uapi/linux/fib_rules.h
+@@ -82,6 +82,10 @@ enum {
+ FR_ACT_BLACKHOLE, /* Drop without notification */
+ FR_ACT_UNREACHABLE, /* Drop with ENETUNREACH */
+ FR_ACT_PROHIBIT, /* Drop with EACCES */
++ FR_ACT_RES9,
++ FR_ACT_RES10,
++ FR_ACT_RES11,
++ FR_ACT_POLICY_FAILED, /* Drop with EACCES */
+ __FR_ACT_MAX,
+ };
+
+--- a/include/uapi/linux/rtnetlink.h
++++ b/include/uapi/linux/rtnetlink.h
+@@ -249,6 +249,7 @@ enum {
+ RTN_THROW, /* Not in this table */
+ RTN_NAT, /* Translate this address */
+ RTN_XRESOLVE, /* Use external resolver */
++ RTN_POLICY_FAILED, /* Failed ingress/egress policy */
+ __RTN_MAX
+ };
+
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -141,6 +141,10 @@ const struct fib_prop fib_props[RTN_MAX
+ .error = -EINVAL,
+ .scope = RT_SCOPE_NOWHERE,
+ },
++ [RTN_POLICY_FAILED] = {
++ .error = -EACCES,
++ .scope = RT_SCOPE_UNIVERSE,
++ },
+ };
+
+ static void rt_fibinfo_free(struct rtable __rcu **rtp)
+--- a/net/ipv4/fib_trie.c
++++ b/net/ipv4/fib_trie.c
+@@ -2734,6 +2734,7 @@ static const char *const rtn_type_names[
+ [RTN_THROW] = "THROW",
+ [RTN_NAT] = "NAT",
+ [RTN_XRESOLVE] = "XRESOLVE",
++ [RTN_POLICY_FAILED] = "POLICY_FAILED",
+ };
+
+ static inline const char *rtn_type(char *buf, size_t len, unsigned int t)
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -175,6 +175,7 @@ static int ipmr_rule_action(struct fib_r
+ case FR_ACT_UNREACHABLE:
+ return -ENETUNREACH;
+ case FR_ACT_PROHIBIT:
++ case FR_ACT_POLICY_FAILED:
+ return -EACCES;
+ case FR_ACT_BLACKHOLE:
+ default:
+--- a/net/ipv6/fib6_rules.c
++++ b/net/ipv6/fib6_rules.c
+@@ -220,6 +220,10 @@ static int __fib6_rule_action(struct fib
+ err = -EACCES;
+ rt = net->ipv6.ip6_prohibit_entry;
+ goto discard_pkt;
++ case FR_ACT_POLICY_FAILED:
++ err = -EACCES;
++ rt = net->ipv6.ip6_policy_failed_entry;
++ goto discard_pkt;
+ }
+
+ tb_id = fib_rule_get_table(rule, arg);
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -163,6 +163,8 @@ static int ip6mr_rule_action(struct fib_
+ return -ENETUNREACH;
+ case FR_ACT_PROHIBIT:
+ return -EACCES;
++ case FR_ACT_POLICY_FAILED:
++ return -EACCES;
+ case FR_ACT_BLACKHOLE:
+ default:
+ return -EINVAL;
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -94,6 +94,8 @@ static int ip6_pkt_discard(struct sk_bu
+ static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
+ static int ip6_pkt_prohibit(struct sk_buff *skb);
+ static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
++static int ip6_pkt_policy_failed(struct sk_buff *skb);
++static int ip6_pkt_policy_failed_out(struct net *net, struct sock *sk, struct sk_buff *skb);
+ static void ip6_link_failure(struct sk_buff *skb);
+ static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu,
+@@ -327,6 +329,18 @@ static const struct rt6_info ip6_prohibi
+ .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
+ };
+
++static const struct rt6_info ip6_policy_failed_entry_template = {
++ .dst = {
++ .__refcnt = ATOMIC_INIT(1),
++ .__use = 1,
++ .obsolete = DST_OBSOLETE_FORCE_CHK,
++ .error = -EACCES,
++ .input = ip6_pkt_policy_failed,
++ .output = ip6_pkt_policy_failed_out,
++ },
++ .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
++};
++
+ static const struct rt6_info ip6_blk_hole_entry_template = {
+ .dst = {
+ .__refcnt = ATOMIC_INIT(1),
+@@ -1048,6 +1062,7 @@ static const int fib6_prop[RTN_MAX + 1]
+ [RTN_BLACKHOLE] = -EINVAL,
+ [RTN_UNREACHABLE] = -EHOSTUNREACH,
+ [RTN_PROHIBIT] = -EACCES,
++ [RTN_POLICY_FAILED] = -EACCES,
+ [RTN_THROW] = -EAGAIN,
+ [RTN_NAT] = -EINVAL,
+ [RTN_XRESOLVE] = -EINVAL,
+@@ -1083,6 +1098,10 @@ static void ip6_rt_init_dst_reject(struc
+ rt->dst.output = ip6_pkt_prohibit_out;
+ rt->dst.input = ip6_pkt_prohibit;
+ break;
++ case RTN_POLICY_FAILED:
++ rt->dst.output = ip6_pkt_policy_failed_out;
++ rt->dst.input = ip6_pkt_policy_failed;
++ break;
+ case RTN_THROW:
+ case RTN_UNREACHABLE:
+ default:
+@@ -4432,6 +4451,17 @@ static int ip6_pkt_prohibit_out(struct n
+ return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
+ }
+
++static int ip6_pkt_policy_failed(struct sk_buff *skb)
++{
++ return ip6_pkt_drop(skb, ICMPV6_POLICY_FAIL, IPSTATS_MIB_INNOROUTES);
++}
++
++static int ip6_pkt_policy_failed_out(struct net *net, struct sock *sk, struct sk_buff *skb)
++{
++ skb->dev = skb_dst(skb)->dev;
++ return ip6_pkt_drop(skb, ICMPV6_POLICY_FAIL, IPSTATS_MIB_OUTNOROUTES);
++}
++
+ /*
+ * Allocate a dst for local (unicast / anycast) address.
+ */
+@@ -4912,7 +4942,8 @@ static int rtm_to_fib6_config(struct sk_
+ if (rtm->rtm_type == RTN_UNREACHABLE ||
+ rtm->rtm_type == RTN_BLACKHOLE ||
+ rtm->rtm_type == RTN_PROHIBIT ||
+- rtm->rtm_type == RTN_THROW)
++ rtm->rtm_type == RTN_THROW ||
++ rtm->rtm_type == RTN_POLICY_FAILED)
+ cfg->fc_flags |= RTF_REJECT;
+
+ if (rtm->rtm_type == RTN_LOCAL)
+@@ -6080,6 +6111,8 @@ static int ip6_route_dev_notify(struct n
+ #ifdef CONFIG_IPV6_MULTIPLE_TABLES
+ net->ipv6.ip6_prohibit_entry->dst.dev = dev;
+ net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
++ net->ipv6.ip6_policy_failed_entry->dst.dev = dev;
++ net->ipv6.ip6_policy_failed_entry->rt6i_idev = in6_dev_get(dev);
+ net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
+ net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
+ #endif
+@@ -6091,6 +6124,7 @@ static int ip6_route_dev_notify(struct n
+ in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
+ #ifdef CONFIG_IPV6_MULTIPLE_TABLES
+ in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
++ in6_dev_put_clear(&net->ipv6.ip6_policy_failed_entry->rt6i_idev);
+ in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
+ #endif
+ }
+@@ -6282,6 +6316,8 @@ static int __net_init ip6_route_net_init
+
+ #ifdef CONFIG_IPV6_MULTIPLE_TABLES
+ net->ipv6.fib6_has_custom_rules = false;
++
++
+ net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
+ sizeof(*net->ipv6.ip6_prohibit_entry),
+ GFP_KERNEL);
+@@ -6292,11 +6328,21 @@ static int __net_init ip6_route_net_init
+ ip6_template_metrics, true);
+ INIT_LIST_HEAD(&net->ipv6.ip6_prohibit_entry->rt6i_uncached);
+
++ net->ipv6.ip6_policy_failed_entry =
++ kmemdup(&ip6_policy_failed_entry_template,
++ sizeof(*net->ipv6.ip6_policy_failed_entry), GFP_KERNEL);
++ if (!net->ipv6.ip6_policy_failed_entry)
++ goto out_ip6_prohibit_entry;
++ net->ipv6.ip6_policy_failed_entry->dst.ops = &net->ipv6.ip6_dst_ops;
++ dst_init_metrics(&net->ipv6.ip6_policy_failed_entry->dst,
++ ip6_template_metrics, true);
++ INIT_LIST_HEAD(&net->ipv6.ip6_policy_failed_entry->rt6i_uncached);
++
+ net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
+ sizeof(*net->ipv6.ip6_blk_hole_entry),
+ GFP_KERNEL);
+ if (!net->ipv6.ip6_blk_hole_entry)
+- goto out_ip6_prohibit_entry;
++ goto out_ip6_policy_failed_entry;
+ net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
+ dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
+ ip6_template_metrics, true);
+@@ -6323,6 +6369,8 @@ out:
+ return ret;
+
+ #ifdef CONFIG_IPV6_MULTIPLE_TABLES
++out_ip6_policy_failed_entry:
++ kfree(net->ipv6.ip6_policy_failed_entry);
+ out_ip6_prohibit_entry:
+ kfree(net->ipv6.ip6_prohibit_entry);
+ out_ip6_null_entry:
+@@ -6342,6 +6390,7 @@ static void __net_exit ip6_route_net_exi
+ kfree(net->ipv6.ip6_null_entry);
+ #ifdef CONFIG_IPV6_MULTIPLE_TABLES
+ kfree(net->ipv6.ip6_prohibit_entry);
++ kfree(net->ipv6.ip6_policy_failed_entry);
+ kfree(net->ipv6.ip6_blk_hole_entry);
+ #endif
+ dst_entries_destroy(&net->ipv6.ip6_dst_ops);
+@@ -6419,6 +6468,9 @@ void __init ip6_route_init_special_entri
+ init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
+ init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
+ init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
++ init_net.ipv6.ip6_policy_failed_entry->dst.dev = init_net.loopback_dev;
++ init_net.ipv6.ip6_policy_failed_entry->rt6i_idev =
++ in6_dev_get(init_net.loopback_dev);
+ #endif
+ }
+
diff --git a/target/linux/generic/pending-5.10/671-net-provide-defines-for-_POLICY_FAILED-until-all-cod.patch b/target/linux/generic/pending-5.10/671-net-provide-defines-for-_POLICY_FAILED-until-all-cod.patch
new file mode 100644
index 0000000000..0e7f35db89
--- /dev/null
+++ b/target/linux/generic/pending-5.10/671-net-provide-defines-for-_POLICY_FAILED-until-all-cod.patch
@@ -0,0 +1,50 @@
+From: Jonas Gorski <jogo@openwrt.org>
+Subject: net: provide defines for _POLICY_FAILED until all code is updated
+
+Upstream introduced ICMPV6_POLICY_FAIL for code 5 of destination
+unreachable, conflicting with our name.
+
+Add appropriate defines to allow our code to build with the new
+name until we have updated our local patches for older kernels
+and userspace packages.
+
+Signed-off-by: Jonas Gorski <jogo@openwrt.org>
+---
+ include/uapi/linux/fib_rules.h | 2 ++
+ include/uapi/linux/icmpv6.h | 2 ++
+ include/uapi/linux/rtnetlink.h | 2 ++
+ 3 files changed, 6 insertions(+)
+
+--- a/include/uapi/linux/fib_rules.h
++++ b/include/uapi/linux/fib_rules.h
+@@ -89,6 +89,8 @@ enum {
+ __FR_ACT_MAX,
+ };
+
++#define FR_ACT_FAILED_POLICY FR_ACT_POLICY_FAILED
++
+ #define FR_ACT_MAX (__FR_ACT_MAX - 1)
+
+ #endif
+--- a/include/uapi/linux/icmpv6.h
++++ b/include/uapi/linux/icmpv6.h
+@@ -126,6 +126,8 @@ struct icmp6hdr {
+ #define ICMPV6_POLICY_FAIL 5
+ #define ICMPV6_REJECT_ROUTE 6
+
++#define ICMPV6_FAILED_POLICY ICMPV6_POLICY_FAIL
++
+ /*
+ * Codes for Time Exceeded
+ */
+--- a/include/uapi/linux/rtnetlink.h
++++ b/include/uapi/linux/rtnetlink.h
+@@ -253,6 +253,8 @@ enum {
+ __RTN_MAX
+ };
+
++#define RTN_FAILED_POLICY RTN_POLICY_FAILED
++
+ #define RTN_MAX (__RTN_MAX - 1)
+
+
diff --git a/target/linux/generic/pending-5.10/680-NET-skip-GRO-for-foreign-MAC-addresses.patch b/target/linux/generic/pending-5.10/680-NET-skip-GRO-for-foreign-MAC-addresses.patch
new file mode 100644
index 0000000000..cd1b30b4af
--- /dev/null
+++ b/target/linux/generic/pending-5.10/680-NET-skip-GRO-for-foreign-MAC-addresses.patch
@@ -0,0 +1,149 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: net: replace GRO optimization patch with a new one that supports VLANs/bridges with different MAC addresses
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ include/linux/netdevice.h | 2 ++
+ include/linux/skbuff.h | 3 ++-
+ net/core/dev.c | 48 +++++++++++++++++++++++++++++++++++++++++++++++
+ net/ethernet/eth.c | 18 +++++++++++++++++-
+ 4 files changed, 69 insertions(+), 2 deletions(-)
+
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -2031,6 +2031,8 @@ struct net_device {
+ struct netdev_hw_addr_list mc;
+ struct netdev_hw_addr_list dev_addrs;
+
++ unsigned char local_addr_mask[MAX_ADDR_LEN];
++
+ #ifdef CONFIG_SYSFS
+ struct kset *queues_kset;
+ #endif
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -858,6 +858,7 @@ struct sk_buff {
+ #ifdef CONFIG_TLS_DEVICE
+ __u8 decrypted:1;
+ #endif
++ __u8 gro_skip:1;
+
+ #ifdef CONFIG_NET_SCHED
+ __u16 tc_index; /* traffic control index */
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -5962,6 +5962,9 @@ static enum gro_result dev_gro_receive(s
+ int same_flow;
+ int grow;
+
++ if (skb->gro_skip)
++ goto normal;
++
+ if (netif_elide_gro(skb->dev))
+ goto normal;
+
+@@ -7790,6 +7793,48 @@ static void __netdev_adjacent_dev_unlink
+ &upper_dev->adj_list.lower);
+ }
+
++static void __netdev_addr_mask(unsigned char *mask, const unsigned char *addr,
++ struct net_device *dev)
++{
++ int i;
++
++ for (i = 0; i < dev->addr_len; i++)
++ mask[i] |= addr[i] ^ dev->dev_addr[i];
++}
++
++static void __netdev_upper_mask(unsigned char *mask, struct net_device *dev,
++ struct net_device *lower)
++{
++ struct net_device *cur;
++ struct list_head *iter;
++
++ netdev_for_each_upper_dev_rcu(dev, cur, iter) {
++ __netdev_addr_mask(mask, cur->dev_addr, lower);
++ __netdev_upper_mask(mask, cur, lower);
++ }
++}
++
++static void __netdev_update_addr_mask(struct net_device *dev)
++{
++ unsigned char mask[MAX_ADDR_LEN];
++ struct net_device *cur;
++ struct list_head *iter;
++
++ memset(mask, 0, sizeof(mask));
++ __netdev_upper_mask(mask, dev, dev);
++ memcpy(dev->local_addr_mask, mask, dev->addr_len);
++
++ netdev_for_each_lower_dev(dev, cur, iter)
++ __netdev_update_addr_mask(cur);
++}
++
++static void netdev_update_addr_mask(struct net_device *dev)
++{
++ rcu_read_lock();
++ __netdev_update_addr_mask(dev);
++ rcu_read_unlock();
++}
++
+ static int __netdev_upper_dev_link(struct net_device *dev,
+ struct net_device *upper_dev, bool master,
+ void *upper_priv, void *upper_info,
+@@ -7841,6 +7886,7 @@ static int __netdev_upper_dev_link(struc
+ if (ret)
+ return ret;
+
++ netdev_update_addr_mask(dev);
+ ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
+ &changeupper_info.info);
+ ret = notifier_to_errno(ret);
+@@ -7937,6 +7983,7 @@ static void __netdev_upper_dev_unlink(st
+
+ __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
+
++ netdev_update_addr_mask(dev);
+ call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
+ &changeupper_info.info);
+
+@@ -8723,6 +8770,7 @@ int dev_set_mac_address(struct net_devic
+ if (err)
+ return err;
+ dev->addr_assign_type = NET_ADDR_SET;
++ netdev_update_addr_mask(dev);
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+ add_device_randomness(dev->dev_addr, dev->addr_len);
+ return 0;
+--- a/net/ethernet/eth.c
++++ b/net/ethernet/eth.c
+@@ -143,6 +143,18 @@ u32 eth_get_headlen(const struct net_dev
+ }
+ EXPORT_SYMBOL(eth_get_headlen);
+
++static inline bool
++eth_check_local_mask(const void *addr1, const void *addr2, const void *mask)
++{
++ const u16 *a1 = addr1;
++ const u16 *a2 = addr2;
++ const u16 *m = mask;
++
++ return (((a1[0] ^ a2[0]) & ~m[0]) |
++ ((a1[1] ^ a2[1]) & ~m[1]) |
++ ((a1[2] ^ a2[2]) & ~m[2]));
++}
++
+ /**
+ * eth_type_trans - determine the packet's protocol ID.
+ * @skb: received socket data
+@@ -174,6 +186,10 @@ __be16 eth_type_trans(struct sk_buff *sk
+ } else {
+ skb->pkt_type = PACKET_OTHERHOST;
+ }
++
++ if (eth_check_local_mask(eth->h_dest, dev->dev_addr,
++ dev->local_addr_mask))
++ skb->gro_skip = 1;
+ }
+
+ /*
diff --git a/target/linux/generic/pending-5.10/681-NET-add-of_get_mac_address_mtd.patch b/target/linux/generic/pending-5.10/681-NET-add-of_get_mac_address_mtd.patch
new file mode 100644
index 0000000000..71fbfe9978
--- /dev/null
+++ b/target/linux/generic/pending-5.10/681-NET-add-of_get_mac_address_mtd.patch
@@ -0,0 +1,135 @@
+From: John Crispin <blogic@openwrt.org>
+Subject: NET: add mtd-mac-address support to of_get_mac_address()
+
+Many embedded devices have information such as mac addresses stored inside mtd
+devices. This patch allows us to add a property inside a node describing a
+network interface. The new property points at a mtd partition with an offset
+where the mac address can be found.
+
+Signed-off-by: John Crispin <blogic@openwrt.org>
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ drivers/of/of_net.c | 37 +++++++++++++++++++++++++++++++++++++
+ include/linux/of_net.h | 1 +
+ 2 files changed, 38 insertions(+)
+
+--- a/drivers/of/of_net.c
++++ b/drivers/of/of_net.c
+@@ -11,6 +11,7 @@
+ #include <linux/phy.h>
+ #include <linux/export.h>
+ #include <linux/device.h>
++#include <linux/mtd/mtd.h>
+
+ /**
+ * of_get_phy_mode - Get phy mode for given device_node
+@@ -45,7 +46,7 @@ int of_get_phy_mode(struct device_node *
+ }
+ EXPORT_SYMBOL_GPL(of_get_phy_mode);
+
+-static const void *of_get_mac_addr(struct device_node *np, const char *name)
++static void *of_get_mac_addr(struct device_node *np, const char *name)
+ {
+ struct property *pp = of_find_property(np, name, NULL);
+
+@@ -78,6 +79,79 @@ static const void *of_get_mac_addr_nvmem
+ return mac;
+ }
+
++static const void *of_get_mac_address_mtd(struct device_node *np)
++{
++#ifdef CONFIG_MTD
++ struct device_node *mtd_np = NULL;
++ struct property *prop;
++ size_t retlen;
++ int size, ret;
++ struct mtd_info *mtd;
++ const char *part;
++ const __be32 *list;
++ phandle phandle;
++ u32 mac_inc = 0;
++ u8 mac[ETH_ALEN];
++ void *addr;
++ u32 inc_idx;
++
++ list = of_get_property(np, "mtd-mac-address", &size);
++ if (!list || (size != (2 * sizeof(*list))))
++ return NULL;
++
++ phandle = be32_to_cpup(list++);
++ if (phandle)
++ mtd_np = of_find_node_by_phandle(phandle);
++
++ if (!mtd_np)
++ return NULL;
++
++ part = of_get_property(mtd_np, "label", NULL);
++ if (!part)
++ part = mtd_np->name;
++
++ mtd = get_mtd_device_nm(part);
++ if (IS_ERR(mtd))
++ return NULL;
++
++ ret = mtd_read(mtd, be32_to_cpup(list), 6, &retlen, mac);
++ put_mtd_device(mtd);
++
++ if (of_property_read_u32(np, "mtd-mac-address-increment-byte", &inc_idx))
++ inc_idx = 5;
++ if (inc_idx > 5)
++ return NULL;
++
++ if (!of_property_read_u32(np, "mtd-mac-address-increment", &mac_inc))
++ mac[inc_idx] += mac_inc;
++
++ if (!is_valid_ether_addr(mac))
++ return NULL;
++
++ addr = of_get_mac_addr(np, "mac-address");
++ if (addr) {
++ memcpy(addr, mac, ETH_ALEN);
++ return addr;
++ }
++
++ prop = kzalloc(sizeof(*prop), GFP_KERNEL);
++ if (!prop)
++ return NULL;
++
++ prop->name = "mac-address";
++ prop->length = ETH_ALEN;
++ prop->value = kmemdup(mac, ETH_ALEN, GFP_KERNEL);
++ if (!prop->value || of_add_property(np, prop))
++ goto free;
++
++ return prop->value;
++free:
++ kfree(prop->value);
++ kfree(prop);
++#endif
++ return NULL;
++}
++
+ /**
+ * Search the device tree for the best MAC address to use. 'mac-address' is
+ * checked first, because that is supposed to contain to "most recent" MAC
+@@ -98,12 +172,20 @@ static const void *of_get_mac_addr_nvmem
+ * this case, the real MAC is in 'local-mac-address', and 'mac-address' exists
+ * but is all zeros.
+ *
++ *
++ * If a mtd-mac-address property exists, try to fetch the MAC address from the
++ * specified mtd device, and store it as a 'mac-address' property
++ *
+ * Return: Will be a valid pointer on success and ERR_PTR in case of error.
+ */
+ const void *of_get_mac_address(struct device_node *np)
+ {
+ const void *addr;
+
++ addr = of_get_mac_address_mtd(np);
++ if (addr)
++ return addr;
++
+ addr = of_get_mac_addr(np, "mac-address");
+ if (addr)
+ return addr;
diff --git a/target/linux/generic/pending-5.10/690-net-add-support-for-threaded-NAPI-polling.patch b/target/linux/generic/pending-5.10/690-net-add-support-for-threaded-NAPI-polling.patch
new file mode 100644
index 0000000000..79b7832f2a
--- /dev/null
+++ b/target/linux/generic/pending-5.10/690-net-add-support-for-threaded-NAPI-polling.patch
@@ -0,0 +1,284 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Sun, 26 Jul 2020 14:03:21 +0200
+Subject: [PATCH] net: add support for threaded NAPI polling
+
+For some drivers (especially 802.11 drivers), doing a lot of work in the NAPI
+poll function does not perform well. Since NAPI poll is bound to the CPU it
+was scheduled from, we can easily end up with a few very busy CPUs spending
+most of their time in softirq/ksoftirqd and some idle ones.
+
+Introduce threaded NAPI for such drivers based on a workqueue. The API is the
+same except for using netif_threaded_napi_add instead of netif_napi_add.
+
+In my tests with mt76 on MT7621 using threaded NAPI + a thread for tx scheduling
+improves LAN->WLAN bridging throughput by 10-50%. Throughput without threaded
+NAPI is wildly inconsistent, depending on the CPU that runs the tx scheduling
+thread.
+
+With threaded NAPI, throughput seems stable and consistent (and higher than
+the best results I got without it).
+
+Based on a patch by Hillf Danton
+
+Cc: Hillf Danton <hdanton@sina.com>
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -347,6 +347,7 @@ struct napi_struct {
+ struct list_head dev_list;
+ struct hlist_node napi_hash_node;
+ unsigned int napi_id;
++ struct work_struct work;
+ };
+
+ enum {
+@@ -357,6 +358,7 @@ enum {
+ NAPI_STATE_LISTED, /* NAPI added to system lists */
+ NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */
+ NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */
++ NAPI_STATE_THREADED, /* Use threaded NAPI */
+ };
+
+ enum {
+@@ -367,6 +369,7 @@ enum {
+ NAPIF_STATE_LISTED = BIT(NAPI_STATE_LISTED),
+ NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
+ NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
++ NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED),
+ };
+
+ enum gro_result {
+@@ -2411,6 +2414,26 @@ void netif_napi_add(struct net_device *d
+ int (*poll)(struct napi_struct *, int), int weight);
+
+ /**
++ * netif_threaded_napi_add - initialize a NAPI context
++ * @dev: network device
++ * @napi: NAPI context
++ * @poll: polling function
++ * @weight: default weight
++ *
++ * This variant of netif_napi_add() should be used from drivers using NAPI
++ * with CPU intensive poll functions.
++ * This will schedule polling from a high priority workqueue
++ */
++static inline void netif_threaded_napi_add(struct net_device *dev,
++ struct napi_struct *napi,
++ int (*poll)(struct napi_struct *, int),
++ int weight)
++{
++ set_bit(NAPI_STATE_THREADED, &napi->state);
++ netif_napi_add(dev, napi, poll, weight);
++}
++
++/**
+ * netif_tx_napi_add - initialize a NAPI context
+ * @dev: network device
+ * @napi: NAPI context
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -159,6 +159,7 @@ static DEFINE_SPINLOCK(offload_lock);
+ struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
+ struct list_head ptype_all __read_mostly; /* Taps */
+ static struct list_head offload_base __read_mostly;
++static struct workqueue_struct *napi_workq __read_mostly;
+
+ static int netif_rx_internal(struct sk_buff *skb);
+ static int call_netdevice_notifiers_info(unsigned long val,
+@@ -6404,6 +6405,11 @@ void __napi_schedule(struct napi_struct
+ {
+ unsigned long flags;
+
++ if (test_bit(NAPI_STATE_THREADED, &n->state)) {
++ queue_work(napi_workq, &n->work);
++ return;
++ }
++
+ local_irq_save(flags);
+ ____napi_schedule(this_cpu_ptr(&softnet_data), n);
+ local_irq_restore(flags);
+@@ -6451,6 +6457,11 @@ EXPORT_SYMBOL(napi_schedule_prep);
+ */
+ void __napi_schedule_irqoff(struct napi_struct *n)
+ {
++ if (test_bit(NAPI_STATE_THREADED, &n->state)) {
++ queue_work(napi_workq, &n->work);
++ return;
++ }
++
+ ____napi_schedule(this_cpu_ptr(&softnet_data), n);
+ }
+ EXPORT_SYMBOL(__napi_schedule_irqoff);
+@@ -6712,6 +6723,86 @@ static void init_gro_hash(struct napi_st
+ napi->gro_bitmask = 0;
+ }
+
++static int __napi_poll(struct napi_struct *n, bool *repoll)
++{
++ int work, weight;
++
++ weight = n->weight;
++
++ /* This NAPI_STATE_SCHED test is for avoiding a race
++ * with netpoll's poll_napi(). Only the entity which
++ * obtains the lock and sees NAPI_STATE_SCHED set will
++ * actually make the ->poll() call. Therefore we avoid
++ * accidentally calling ->poll() when NAPI is not scheduled.
++ */
++ work = 0;
++ if (test_bit(NAPI_STATE_SCHED, &n->state)) {
++ work = n->poll(n, weight);
++ trace_napi_poll(n, work, weight);
++ }
++
++ if (unlikely(work > weight))
++ pr_err_once("NAPI poll function %pS returned %d, exceeding its budget of %d.\n",
++ n->poll, work, weight);
++
++ if (likely(work < weight))
++ return work;
++
++ /* Drivers must not modify the NAPI state if they
++ * consume the entire weight. In such cases this code
++ * still "owns" the NAPI instance and therefore can
++ * move the instance around on the list at-will.
++ */
++ if (unlikely(napi_disable_pending(n))) {
++ napi_complete(n);
++ return work;
++ }
++
++ if (n->gro_bitmask) {
++ /* flush too old packets
++ * If HZ < 1000, flush all packets.
++ */
++ napi_gro_flush(n, HZ >= 1000);
++ }
++
++ gro_normal_list(n);
++
++ *repoll = true;
++
++ return work;
++}
++
++static void napi_workfn(struct work_struct *work)
++{
++ struct napi_struct *n = container_of(work, struct napi_struct, work);
++ void *have;
++
++ for (;;) {
++ bool repoll = false;
++
++ local_bh_disable();
++
++ have = netpoll_poll_lock(n);
++ __napi_poll(n, &repoll);
++ netpoll_poll_unlock(have);
++
++ local_bh_enable();
++
++ if (!repoll)
++ return;
++
++ if (!need_resched())
++ continue;
++
++ /*
++ * have to pay for the latency of task switch even if
++ * napi is scheduled
++ */
++ queue_work(napi_workq, work);
++ return;
++ }
++}
++
+ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int), int weight)
+ {
+@@ -6735,6 +6826,7 @@ void netif_napi_add(struct net_device *d
+ #ifdef CONFIG_NETPOLL
+ napi->poll_owner = -1;
+ #endif
++ INIT_WORK(&napi->work, napi_workfn);
+ set_bit(NAPI_STATE_SCHED, &napi->state);
+ set_bit(NAPI_STATE_NPSVC, &napi->state);
+ list_add_rcu(&napi->dev_list, &dev->napi_list);
+@@ -6777,6 +6869,7 @@ void __netif_napi_del(struct napi_struct
+ if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state))
+ return;
+
++ cancel_work_sync(&napi->work);
+ napi_hash_del(napi);
+ list_del_rcu(&napi->dev_list);
+ napi_free_frags(napi);
+@@ -6788,52 +6881,18 @@ EXPORT_SYMBOL(__netif_napi_del);
+
+ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
+ {
++ bool do_repoll = false;
+ void *have;
+- int work, weight;
++ int work;
+
+ list_del_init(&n->poll_list);
+
+ have = netpoll_poll_lock(n);
+
+- weight = n->weight;
+-
+- /* This NAPI_STATE_SCHED test is for avoiding a race
+- * with netpoll's poll_napi(). Only the entity which
+- * obtains the lock and sees NAPI_STATE_SCHED set will
+- * actually make the ->poll() call. Therefore we avoid
+- * accidentally calling ->poll() when NAPI is not scheduled.
+- */
+- work = 0;
+- if (test_bit(NAPI_STATE_SCHED, &n->state)) {
+- work = n->poll(n, weight);
+- trace_napi_poll(n, work, weight);
+- }
+-
+- if (unlikely(work > weight))
+- pr_err_once("NAPI poll function %pS returned %d, exceeding its budget of %d.\n",
+- n->poll, work, weight);
+-
+- if (likely(work < weight))
+- goto out_unlock;
++ work = __napi_poll(n, &do_repoll);
+
+- /* Drivers must not modify the NAPI state if they
+- * consume the entire weight. In such cases this code
+- * still "owns" the NAPI instance and therefore can
+- * move the instance around on the list at-will.
+- */
+- if (unlikely(napi_disable_pending(n))) {
+- napi_complete(n);
++ if (!do_repoll)
+ goto out_unlock;
+- }
+-
+- if (n->gro_bitmask) {
+- /* flush too old packets
+- * If HZ < 1000, flush all packets.
+- */
+- napi_gro_flush(n, HZ >= 1000);
+- }
+-
+- gro_normal_list(n);
+
+ /* Some drivers may have called napi_schedule
+ * prior to exhausting their budget.
+@@ -11288,6 +11347,10 @@ static int __init net_dev_init(void)
+ sd->backlog.weight = weight_p;
+ }
+
++ napi_workq = alloc_workqueue("napi_workq", WQ_UNBOUND | WQ_HIGHPRI,
++ WQ_UNBOUND_MAX_ACTIVE | WQ_SYSFS);
++ BUG_ON(!napi_workq);
++
+ dev_boot_phase = 0;
+
+ /* The loopback device is special if any other network devices
diff --git a/target/linux/generic/pending-5.10/691-net-add-sysfs-attribute-for-enabling-threaded-NAPI.patch b/target/linux/generic/pending-5.10/691-net-add-sysfs-attribute-for-enabling-threaded-NAPI.patch
new file mode 100644
index 0000000000..279ae2c25a
--- /dev/null
+++ b/target/linux/generic/pending-5.10/691-net-add-sysfs-attribute-for-enabling-threaded-NAPI.patch
@@ -0,0 +1,74 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Fri, 21 Aug 2020 15:07:54 +0200
+Subject: [PATCH] net: add sysfs attribute for enabling threaded NAPI
+
+This can be used to enable threaded NAPI on drivers that did not explicitly
+request it.
+
+Suggested-by: Eric Dumazet <eric.dumazet@gmail.com>
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/net/core/net-sysfs.c
++++ b/net/core/net-sysfs.c
+@@ -472,6 +472,52 @@ static ssize_t proto_down_store(struct d
+ }
+ NETDEVICE_SHOW_RW(proto_down, fmt_dec);
+
++static int change_napi_threaded(struct net_device *dev, unsigned long val)
++{
++ struct napi_struct *napi;
++
++ if (list_empty(&dev->napi_list))
++ return -EOPNOTSUPP;
++
++ list_for_each_entry(napi, &dev->napi_list, dev_list) {
++ if (val)
++ set_bit(NAPI_STATE_THREADED, &napi->state);
++ else
++ clear_bit(NAPI_STATE_THREADED, &napi->state);
++ }
++
++ return 0;
++}
++
++static ssize_t napi_threaded_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t len)
++{
++ return netdev_store(dev, attr, buf, len, change_napi_threaded);
++}
++
++static ssize_t napi_threaded_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct net_device *netdev = to_net_dev(dev);
++ struct napi_struct *napi;
++ bool enabled = false;
++
++ if (!rtnl_trylock())
++ return restart_syscall();
++
++ list_for_each_entry(napi, &netdev->napi_list, dev_list) {
++ if (test_bit(NAPI_STATE_THREADED, &napi->state))
++ enabled = true;
++ }
++
++ rtnl_unlock();
++
++ return sprintf(buf, fmt_dec, enabled);
++}
++static DEVICE_ATTR_RW(napi_threaded);
++
+ static ssize_t phys_port_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+ {
+@@ -564,6 +610,7 @@ static struct attribute *net_class_attrs
+ &dev_attr_tx_queue_len.attr,
+ &dev_attr_gro_flush_timeout.attr,
+ &dev_attr_napi_defer_hard_irqs.attr,
++ &dev_attr_napi_threaded.attr,
+ &dev_attr_phys_port_id.attr,
+ &dev_attr_phys_port_name.attr,
+ &dev_attr_phys_switch_id.attr,
diff --git a/target/linux/generic/pending-5.10/703-phy-add-detach-callback-to-struct-phy_driver.patch b/target/linux/generic/pending-5.10/703-phy-add-detach-callback-to-struct-phy_driver.patch
new file mode 100644
index 0000000000..371cffc58b
--- /dev/null
+++ b/target/linux/generic/pending-5.10/703-phy-add-detach-callback-to-struct-phy_driver.patch
@@ -0,0 +1,38 @@
+From: Gabor Juhos <juhosg@openwrt.org>
+Subject: generic: add detach callback to struct phy_driver
+
+lede-commit: fe61fc2d7d0b3fb348b502f68f98243b3ddf5867
+
+Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
+---
+ drivers/net/phy/phy_device.c | 3 +++
+ include/linux/phy.h | 6 ++++++
+ 2 files changed, 9 insertions(+)
+
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -1663,6 +1663,9 @@ void phy_detach(struct phy_device *phyde
+ struct module *ndev_owner = NULL;
+ struct mii_bus *bus;
+
++ if (phydev->drv && phydev->drv->detach)
++ phydev->drv->detach(phydev);
++
+ if (phydev->sysfs_links) {
+ if (dev)
+ sysfs_remove_link(&dev->dev.kobj, "phydev");
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -759,6 +759,12 @@ struct phy_driver {
+ /** @handle_interrupt: Override default interrupt handling */
+ irqreturn_t (*handle_interrupt)(struct phy_device *phydev);
+
++ /*
++ * Called before an ethernet device is detached
++ * from the PHY.
++ */
++ void (*detach)(struct phy_device *phydev);
++
+ /** @remove: Clears up any memory if needed */
+ void (*remove)(struct phy_device *phydev);
+
diff --git a/target/linux/generic/pending-5.10/735-net-phy-at803x-fix-at8033-sgmii-mode.patch b/target/linux/generic/pending-5.10/735-net-phy-at803x-fix-at8033-sgmii-mode.patch
new file mode 100644
index 0000000000..b063b10251
--- /dev/null
+++ b/target/linux/generic/pending-5.10/735-net-phy-at803x-fix-at8033-sgmii-mode.patch
@@ -0,0 +1,51 @@
+From: Roman Yeryomin <roman@advem.lv>
+Subject: kernel: add at803x fix for sgmii mode
+
+Some (possibly broken) bootloaders incorreclty initialize at8033
+phy. This patch enables sgmii autonegotiation mode.
+
+[john@phrozen.org: felix added this to his upstream queue]
+
+Signed-off-by: Roman Yeryomin <roman@advem.lv>
+---
+ drivers/net/phy/at803x.c | 25 +++++++++++++++++++++++++
+ 1 file changed, 25 insertions(+)
+
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -76,6 +76,7 @@
+ #define AT803X_LOC_MAC_ADDR_32_47_OFFSET 0x804A
+ #define AT803X_REG_CHIP_CONFIG 0x1f
+ #define AT803X_BT_BX_REG_SEL 0x8000
++#define AT803X_SGMII_ANEG_EN 0x1000
+
+ #define AT803X_DEBUG_ADDR 0x1D
+ #define AT803X_DEBUG_DATA 0x1E
+@@ -562,6 +563,27 @@ static int at8031_pll_config(struct phy_
+ static int at803x_config_init(struct phy_device *phydev)
+ {
+ int ret;
++ u32 v;
++
++ if (phydev->drv->phy_id == ATH8031_PHY_ID &&
++ phydev->interface == PHY_INTERFACE_MODE_SGMII)
++ {
++ v = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
++ /* select SGMII/fiber page */
++ ret = phy_write(phydev, AT803X_REG_CHIP_CONFIG,
++ v & ~AT803X_BT_BX_REG_SEL);
++ if (ret)
++ return ret;
++ /* enable SGMII autonegotiation */
++ ret = phy_write(phydev, MII_BMCR, AT803X_SGMII_ANEG_EN);
++ if (ret)
++ return ret;
++ /* select copper page */
++ ret = phy_write(phydev, AT803X_REG_CHIP_CONFIG,
++ v | AT803X_BT_BX_REG_SEL);
++ if (ret)
++ return ret;
++ }
+
+ /* The RX and TX delay default is:
+ * after HW reset: RX delay enabled and TX delay disabled
diff --git a/target/linux/generic/pending-5.10/760-net-dsa-mv88e6xxx-fix-vlan-setup.patch b/target/linux/generic/pending-5.10/760-net-dsa-mv88e6xxx-fix-vlan-setup.patch
new file mode 100644
index 0000000000..e6a3d15b79
--- /dev/null
+++ b/target/linux/generic/pending-5.10/760-net-dsa-mv88e6xxx-fix-vlan-setup.patch
@@ -0,0 +1,27 @@
+From a1b291f3f6c80a6c5ccad7283fc472d77a2a4763 Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@armlinux.org.uk>
+Date: Sun, 22 Dec 2019 12:40:11 +0000
+Subject: [PATCH] net: dsa: mv88e6xxx: fix vlan setup
+
+Provide an option that drivers can set to indicate they want to receive
+vlan configuration even when vlan filtering is disabled. This is safe
+for Marvell DSA bridges, which do not look up ingress traffic in the
+VTU if the port is in 8021Q disabled state. Whether this change is
+suitable for all DSA bridges is not known.
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: DENG Qingfang <dqfext@gmail.com>
+---
+ drivers/net/dsa/mv88e6xxx/chip.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -2853,6 +2853,7 @@ static int mv88e6xxx_setup(struct dsa_sw
+
+ chip->ds = ds;
+ ds->slave_mii_bus = mv88e6xxx_default_mdio_bus(chip);
++ ds->configure_vlan_while_not_filtering = true;
+
+ mv88e6xxx_reg_lock(chip);
+
diff --git a/target/linux/generic/pending-5.10/761-net-dsa-mt7530-Support-EEE-features.patch b/target/linux/generic/pending-5.10/761-net-dsa-mt7530-Support-EEE-features.patch
new file mode 100644
index 0000000000..3e4413db02
--- /dev/null
+++ b/target/linux/generic/pending-5.10/761-net-dsa-mt7530-Support-EEE-features.patch
@@ -0,0 +1,121 @@
+From 9cfb2d426c38272f245e9e6f62b3552d1ed5852b Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Ren=C3=A9=20van=20Dorst?= <opensource@vdorst.com>
+Date: Tue, 21 Apr 2020 00:18:08 +0200
+Subject: [PATCH] net: dsa: mt7530: Support EEE features
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Signed-off-by: René van Dorst <opensource@vdorst.com>
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -2266,9 +2266,13 @@ static void mt753x_phylink_mac_link_up(s
+ switch (speed) {
+ case SPEED_1000:
+ mcr |= PMCR_FORCE_SPEED_1000;
++ if (priv->eee_enable & BIT(port))
++ mcr |= PMCR_FORCE_EEE1G;
+ break;
+ case SPEED_100:
+ mcr |= PMCR_FORCE_SPEED_100;
++ if (priv->eee_enable & BIT(port))
++ mcr |= PMCR_FORCE_EEE100;
+ break;
+ }
+ if (duplex == DUPLEX_FULL) {
+@@ -2509,6 +2513,54 @@ mt753x_phy_write(struct dsa_switch *ds,
+ return priv->info->phy_write(ds, port, regnum, val);
+ }
+
++static int mt7530_get_mac_eee(struct dsa_switch *ds, int port,
++ struct ethtool_eee *e)
++{
++ struct mt7530_priv *priv = ds->priv;
++ u32 eeecr, pmsr;
++
++ e->eee_enabled = !!(priv->eee_enable & BIT(port));
++
++ if (e->eee_enabled) {
++ eeecr = mt7530_read(priv, MT7530_PMEEECR_P(port));
++ e->tx_lpi_enabled = !(eeecr & LPI_MODE_EN);
++ e->tx_lpi_timer = (eeecr >> 4) & 0xFFF;
++ pmsr = mt7530_read(priv, MT7530_PMSR_P(port));
++ e->eee_active = e->eee_enabled && !!(pmsr & PMSR_EEE1G);
++ } else {
++ e->tx_lpi_enabled = 0;
++ e->tx_lpi_timer = 0;
++ e->eee_active = 0;
++ }
++
++ return 0;
++}
++
++static int mt7530_set_mac_eee(struct dsa_switch *ds, int port,
++ struct ethtool_eee *e)
++{
++ struct mt7530_priv *priv = ds->priv;
++ u32 eeecr;
++
++ if (e->tx_lpi_enabled && e->tx_lpi_timer > 0xFFF)
++ return -EINVAL;
++
++ if (e->eee_enabled) {
++ priv->eee_enable |= BIT(port);
++ //MT7530_PMEEECR_P
++ eeecr = mt7530_read(priv, MT7530_PMEEECR_P(port));
++ eeecr &= 0xFFFF0000;
++ if (!e->tx_lpi_enabled)
++ eeecr |= LPI_MODE_EN;
++ eeecr = LPI_THRESH(e->tx_lpi_timer);
++ mt7530_write(priv, MT7530_PMEEECR_P(port), eeecr);
++ } else {
++ priv->eee_enable &= ~(BIT(port));
++ }
++
++ return 0;
++}
++
+ static const struct dsa_switch_ops mt7530_switch_ops = {
+ .get_tag_protocol = mtk_get_tag_protocol,
+ .setup = mt753x_setup,
+@@ -2537,6 +2589,8 @@ static const struct dsa_switch_ops mt753
+ .phylink_mac_an_restart = mt753x_phylink_mac_an_restart,
+ .phylink_mac_link_down = mt753x_phylink_mac_link_down,
+ .phylink_mac_link_up = mt753x_phylink_mac_link_up,
++ .get_mac_eee = mt7530_get_mac_eee,
++ .set_mac_eee = mt7530_set_mac_eee,
+ };
+
+ static const struct mt753x_info mt753x_table[] = {
+--- a/drivers/net/dsa/mt7530.h
++++ b/drivers/net/dsa/mt7530.h
+@@ -240,6 +240,8 @@ enum mt7530_vlan_port_attr {
+ #define PMCR_RX_EN BIT(13)
+ #define PMCR_BACKOFF_EN BIT(9)
+ #define PMCR_BACKPR_EN BIT(8)
++#define PMCR_FORCE_EEE1G BIT(7)
++#define PMCR_FORCE_EEE100 BIT(6)
+ #define PMCR_TX_FC_EN BIT(5)
+ #define PMCR_RX_FC_EN BIT(4)
+ #define PMCR_FORCE_SPEED_1000 BIT(3)
+@@ -289,6 +291,12 @@ enum mt7530_vlan_port_attr {
+ #define MT7531_DBG_CNT(x) (0x3018 + (x) * 0x100)
+ #define MT7531_DIS_CLR BIT(31)
+
++#define MT7530_PMEEECR_P(x) (0x3004 + (x) * 0x100)
++#define WAKEUP_TIME_1000(x) ((x & 0xFF) << 24)
++#define WAKEUP_TIME_100(x) ((x & 0xFF) << 16)
++#define LPI_THRESH(x) ((x & 0xFFF) << 4)
++#define LPI_MODE_EN BIT(0)
++
+ /* Register for MIB */
+ #define MT7530_PORT_MIB_COUNTER(x) (0x4000 + (x) * 0x100)
+ #define MT7530_MIB_CCR 0x4fe0
+@@ -732,6 +740,7 @@ struct mt7530_priv {
+ unsigned int p5_intf_sel;
+ u8 mirror_rx;
+ u8 mirror_tx;
++ u8 eee_enable;
+
+ struct mt7530_port ports[MT7530_NUM_PORTS];
+ /* protect among processes for registers access*/
diff --git a/target/linux/generic/pending-5.10/770-00-net-ethernet-mtk_eth_soc-use-napi_consume_skb.patch b/target/linux/generic/pending-5.10/770-00-net-ethernet-mtk_eth_soc-use-napi_consume_skb.patch
new file mode 100644
index 0000000000..85c859b388
--- /dev/null
+++ b/target/linux/generic/pending-5.10/770-00-net-ethernet-mtk_eth_soc-use-napi_consume_skb.patch
@@ -0,0 +1,72 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Mon, 8 Jun 2020 17:01:12 +0200
+Subject: [PATCH] net: ethernet: mtk_eth_soc: use napi_consume_skb
+
+Should improve performance, since it can use bulk free
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -857,7 +857,8 @@ static int txd_to_idx(struct mtk_tx_ring
+ return ((void *)dma - (void *)ring->dma) / sizeof(*dma);
+ }
+
+-static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
++static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
++ bool napi)
+ {
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
+@@ -889,8 +890,12 @@ static void mtk_tx_unmap(struct mtk_eth
+
+ tx_buf->flags = 0;
+ if (tx_buf->skb &&
+- (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
+- dev_kfree_skb_any(tx_buf->skb);
++ (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) {
++ if (napi)
++ napi_consume_skb(tx_buf->skb, napi);
++ else
++ dev_kfree_skb_any(tx_buf->skb);
++ }
+ tx_buf->skb = NULL;
+ }
+
+@@ -1068,7 +1073,7 @@ err_dma:
+ tx_buf = mtk_desc_to_tx_buf(ring, itxd);
+
+ /* unmap dma */
+- mtk_tx_unmap(eth, tx_buf);
++ mtk_tx_unmap(eth, tx_buf, false);
+
+ itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+@@ -1386,7 +1391,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
+ done[mac]++;
+ budget--;
+ }
+- mtk_tx_unmap(eth, tx_buf);
++ mtk_tx_unmap(eth, tx_buf, true);
+
+ ring->last_free = desc;
+ atomic_inc(&ring->free_count);
+@@ -1423,7 +1428,7 @@ static int mtk_poll_tx_pdma(struct mtk_e
+ budget--;
+ }
+
+- mtk_tx_unmap(eth, tx_buf);
++ mtk_tx_unmap(eth, tx_buf, true);
+
+ desc = &ring->dma[cpu];
+ ring->last_free = desc;
+@@ -1625,7 +1630,7 @@ static void mtk_tx_clean(struct mtk_eth
+
+ if (ring->buf) {
+ for (i = 0; i < MTK_DMA_SIZE; i++)
+- mtk_tx_unmap(eth, &ring->buf[i]);
++ mtk_tx_unmap(eth, &ring->buf[i], false);
+ kfree(ring->buf);
+ ring->buf = NULL;
+ }
diff --git a/target/linux/generic/pending-5.10/770-01-net-ethernet-mtk_eth_soc-significantly-reduce-mdio-b.patch b/target/linux/generic/pending-5.10/770-01-net-ethernet-mtk_eth_soc-significantly-reduce-mdio-b.patch
new file mode 100644
index 0000000000..922c35a3e4
--- /dev/null
+++ b/target/linux/generic/pending-5.10/770-01-net-ethernet-mtk_eth_soc-significantly-reduce-mdio-b.patch
@@ -0,0 +1,26 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Mon, 8 Jun 2020 17:02:39 +0200
+Subject: [PATCH] net: ethernet: mtk_eth_soc: significantly reduce mdio bus
+ access latency
+
+usleep_range often ends up sleeping much longer than the 10-20us provided
+as a range here. This causes significant latency in mdio bus acceses,
+which easily adds multiple seconds to the boot time on MT7621 when polling
+DSA slave ports.
+Use cond_resched instead of usleep_range, since the MDIO access does not
+take much time
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -85,7 +85,7 @@ static int mtk_mdio_busy_wait(struct mtk
+ return 0;
+ if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
+ break;
+- usleep_range(10, 20);
++ cond_resched();
+ }
+
+ dev_err(eth->dev, "mdio: MDIO timeout\n");
diff --git a/target/linux/generic/pending-5.10/770-02-net-ethernet-mtk_eth_soc-fix-rx-vlan-offload.patch b/target/linux/generic/pending-5.10/770-02-net-ethernet-mtk_eth_soc-fix-rx-vlan-offload.patch
new file mode 100644
index 0000000000..cebfe32d95
--- /dev/null
+++ b/target/linux/generic/pending-5.10/770-02-net-ethernet-mtk_eth_soc-fix-rx-vlan-offload.patch
@@ -0,0 +1,31 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Wed, 26 Aug 2020 16:52:12 +0200
+Subject: [PATCH] net: ethernet: mtk_eth_soc: fix rx vlan offload
+
+The VLAN ID in the rx descriptor is only valid if the RX_DMA_VID bit is set
+Fixes frames wrongly marked with VLAN tags
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -1324,7 +1324,7 @@ static int mtk_poll_rx(struct napi_struc
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
+- RX_DMA_VID(trxd.rxd3))
++ (trxd.rxd2 & RX_DMA_VTAG))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ RX_DMA_VID(trxd.rxd3));
+ skb_record_rx_queue(skb, 0);
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -295,6 +295,7 @@
+ #define RX_DMA_LSO BIT(30)
+ #define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16)
+ #define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff)
++#define RX_DMA_VTAG BIT(15)
+
+ /* QDMA descriptor rxd3 */
+ #define RX_DMA_VID(_x) ((_x) & 0xfff)
diff --git a/target/linux/generic/pending-5.10/770-03-net-ethernet-mtk_eth_soc-fix-unnecessary-tx-queue-st.patch b/target/linux/generic/pending-5.10/770-03-net-ethernet-mtk_eth_soc-fix-unnecessary-tx-queue-st.patch
new file mode 100644
index 0000000000..82bdb10eba
--- /dev/null
+++ b/target/linux/generic/pending-5.10/770-03-net-ethernet-mtk_eth_soc-fix-unnecessary-tx-queue-st.patch
@@ -0,0 +1,50 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Wed, 26 Aug 2020 16:55:54 +0200
+Subject: [PATCH] net: ethernet: mtk_eth_soc: fix unnecessary tx queue
+ stops
+
+When running short on descriptors, only stop the queue for the netdev that tx
+was attempted for. By the time the something tries to send on the other netdev,
+the ring might have some more room already
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -1130,17 +1130,6 @@ static void mtk_wake_queue(struct mtk_et
+ }
+ }
+
+-static void mtk_stop_queue(struct mtk_eth *eth)
+-{
+- int i;
+-
+- for (i = 0; i < MTK_MAC_COUNT; i++) {
+- if (!eth->netdev[i])
+- continue;
+- netif_stop_queue(eth->netdev[i]);
+- }
+-}
+-
+ static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct mtk_mac *mac = netdev_priv(dev);
+@@ -1161,7 +1150,7 @@ static netdev_tx_t mtk_start_xmit(struct
+
+ tx_num = mtk_cal_txd_req(skb);
+ if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
+- mtk_stop_queue(eth);
++ netif_stop_queue(dev);
+ netif_err(eth, tx_queued, dev,
+ "Tx Ring full when queue awake!\n");
+ spin_unlock(&eth->page_lock);
+@@ -1187,7 +1176,7 @@ static netdev_tx_t mtk_start_xmit(struct
+ goto drop;
+
+ if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
+- mtk_stop_queue(eth);
++ netif_stop_queue(dev);
+
+ spin_unlock(&eth->page_lock);
+
diff --git a/target/linux/generic/pending-5.10/770-04-net-ethernet-mtk_eth_soc-use-larger-burst-size-for-q.patch b/target/linux/generic/pending-5.10/770-04-net-ethernet-mtk_eth_soc-use-larger-burst-size-for-q.patch
new file mode 100644
index 0000000000..94c0510257
--- /dev/null
+++ b/target/linux/generic/pending-5.10/770-04-net-ethernet-mtk_eth_soc-use-larger-burst-size-for-q.patch
@@ -0,0 +1,32 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Wed, 26 Aug 2020 16:58:55 +0200
+Subject: [PATCH] net: ethernet: mtk_eth_soc: use larger burst size for
+ qdma tx
+
+Improves tx performance
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -2191,7 +2191,7 @@ static int mtk_start_dma(struct mtk_eth
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ mtk_w32(eth,
+ MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
+- MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO |
++ MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
+ MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
+ MTK_RX_BT_32DWORDS,
+ MTK_QDMA_GLO_CFG);
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -197,7 +197,7 @@
+ #define MTK_RX_BT_32DWORDS (3 << 11)
+ #define MTK_NDP_CO_PRO BIT(10)
+ #define MTK_TX_WB_DDONE BIT(6)
+-#define MTK_DMA_SIZE_16DWORDS (2 << 4)
++#define MTK_TX_BT_32DWORDS (3 << 4)
+ #define MTK_RX_DMA_BUSY BIT(3)
+ #define MTK_TX_DMA_BUSY BIT(1)
+ #define MTK_RX_DMA_EN BIT(2)
diff --git a/target/linux/generic/pending-5.10/770-05-net-ethernet-mtk_eth_soc-increase-DMA-ring-sizes.patch b/target/linux/generic/pending-5.10/770-05-net-ethernet-mtk_eth_soc-increase-DMA-ring-sizes.patch
new file mode 100644
index 0000000000..f68cbc333c
--- /dev/null
+++ b/target/linux/generic/pending-5.10/770-05-net-ethernet-mtk_eth_soc-increase-DMA-ring-sizes.patch
@@ -0,0 +1,21 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Wed, 26 Aug 2020 16:59:41 +0200
+Subject: [PATCH] net: ethernet: mtk_eth_soc: increase DMA ring sizes
+
+256 descriptors is not enough for multi-gigabit traffic under load on MT7622.
+Bump it to 512 to improve performance
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -19,7 +19,7 @@
+ #define MTK_QDMA_PAGE_SIZE 2048
+ #define MTK_MAX_RX_LENGTH 1536
+ #define MTK_TX_DMA_BUF_LEN 0x3fff
+-#define MTK_DMA_SIZE 256
++#define MTK_DMA_SIZE 512
+ #define MTK_NAPI_WEIGHT 64
+ #define MTK_MAC_COUNT 2
+ #define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
diff --git a/target/linux/generic/pending-5.10/770-06-net-ethernet-mtk_eth_soc-implement-dynamic-interrupt.patch b/target/linux/generic/pending-5.10/770-06-net-ethernet-mtk_eth_soc-implement-dynamic-interrupt.patch
new file mode 100644
index 0000000000..61a51ba33b
--- /dev/null
+++ b/target/linux/generic/pending-5.10/770-06-net-ethernet-mtk_eth_soc-implement-dynamic-interrupt.patch
@@ -0,0 +1,281 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Wed, 26 Aug 2020 17:02:30 +0200
+Subject: [PATCH] net: ethernet: mtk_eth_soc: implement dynamic interrupt
+ moderation
+
+Reduces the number of interrupts under load
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/drivers/net/ethernet/mediatek/Kconfig
++++ b/drivers/net/ethernet/mediatek/Kconfig
+@@ -10,6 +10,7 @@ if NET_VENDOR_MEDIATEK
+ config NET_MEDIATEK_SOC
+ tristate "MediaTek SoC Gigabit Ethernet support"
+ select PHYLINK
++ select DIMLIB
+ help
+ This driver supports the gigabit ethernet MACs in the
+ MediaTek SoC family.
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -1232,12 +1232,13 @@ static void mtk_update_rx_cpu_idx(struct
+ static int mtk_poll_rx(struct napi_struct *napi, int budget,
+ struct mtk_eth *eth)
+ {
++ struct dim_sample dim_sample = {};
+ struct mtk_rx_ring *ring;
+ int idx;
+ struct sk_buff *skb;
+ u8 *data, *new_data;
+ struct mtk_rx_dma *rxd, trxd;
+- int done = 0;
++ int done = 0, bytes = 0;
+
+ while (done < budget) {
+ struct net_device *netdev;
+@@ -1311,6 +1312,7 @@ static int mtk_poll_rx(struct napi_struc
+ else
+ skb_checksum_none_assert(skb);
+ skb->protocol = eth_type_trans(skb, netdev);
++ bytes += pktlen;
+
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
+ (trxd.rxd2 & RX_DMA_VTAG))
+@@ -1342,6 +1344,12 @@ rx_done:
+ mtk_update_rx_cpu_idx(eth);
+ }
+
++ eth->rx_packets += done;
++ eth->rx_bytes += bytes;
++ dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
++ &dim_sample);
++ net_dim(&eth->rx_dim, dim_sample);
++
+ return done;
+ }
+
+@@ -1434,6 +1442,7 @@ static int mtk_poll_tx_pdma(struct mtk_e
+ static int mtk_poll_tx(struct mtk_eth *eth, int budget)
+ {
+ struct mtk_tx_ring *ring = &eth->tx_ring;
++ struct dim_sample dim_sample = {};
+ unsigned int done[MTK_MAX_DEVS];
+ unsigned int bytes[MTK_MAX_DEVS];
+ int total = 0, i;
+@@ -1451,8 +1460,14 @@ static int mtk_poll_tx(struct mtk_eth *e
+ continue;
+ netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
+ total += done[i];
++ eth->tx_packets += done[i];
++ eth->tx_bytes += bytes[i];
+ }
+
++ dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
++ &dim_sample);
++ net_dim(&eth->tx_dim, dim_sample);
++
+ if (mtk_queue_stopped(eth) &&
+ (atomic_read(&ring->free_count) > ring->thresh))
+ mtk_wake_queue(eth);
+@@ -2127,6 +2142,7 @@ static irqreturn_t mtk_handle_irq_rx(int
+ {
+ struct mtk_eth *eth = _eth;
+
++ eth->rx_events++;
+ if (likely(napi_schedule_prep(&eth->rx_napi))) {
+ __napi_schedule(&eth->rx_napi);
+ mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
+@@ -2139,6 +2155,7 @@ static irqreturn_t mtk_handle_irq_tx(int
+ {
+ struct mtk_eth *eth = _eth;
+
++ eth->tx_events++;
+ if (likely(napi_schedule_prep(&eth->tx_napi))) {
+ __napi_schedule(&eth->tx_napi);
+ mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+@@ -2315,6 +2332,9 @@ static int mtk_stop(struct net_device *d
+ napi_disable(&eth->tx_napi);
+ napi_disable(&eth->rx_napi);
+
++ cancel_work_sync(&eth->rx_dim.work);
++ cancel_work_sync(&eth->tx_dim.work);
++
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
+ mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
+@@ -2364,6 +2384,64 @@ err_disable_clks:
+ return ret;
+ }
+
++static void mtk_dim_rx(struct work_struct *work)
++{
++ struct dim *dim = container_of(work, struct dim, work);
++ struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
++ struct dim_cq_moder cur_profile;
++ u32 val, cur;
++
++ cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode,
++ dim->profile_ix);
++ spin_lock_bh(&eth->dim_lock);
++
++ val = mtk_r32(eth, MTK_PDMA_DELAY_INT);
++ val &= MTK_PDMA_DELAY_TX_MASK;
++ val |= MTK_PDMA_DELAY_RX_EN;
++
++ cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
++ val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT;
++
++ cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
++ val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
++
++ mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
++ mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
++
++ spin_unlock_bh(&eth->dim_lock);
++
++ dim->state = DIM_START_MEASURE;
++}
++
++static void mtk_dim_tx(struct work_struct *work)
++{
++ struct dim *dim = container_of(work, struct dim, work);
++ struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
++ struct dim_cq_moder cur_profile;
++ u32 val, cur;
++
++ cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode,
++ dim->profile_ix);
++ spin_lock_bh(&eth->dim_lock);
++
++ val = mtk_r32(eth, MTK_PDMA_DELAY_INT);
++ val &= MTK_PDMA_DELAY_RX_MASK;
++ val |= MTK_PDMA_DELAY_TX_EN;
++
++ cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
++ val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT;
++
++ cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
++ val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
++
++ mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
++ mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
++
++ spin_unlock_bh(&eth->dim_lock);
++
++ dim->state = DIM_START_MEASURE;
++}
++
+ static int mtk_hw_init(struct mtk_eth *eth)
+ {
+ int i, val, ret;
+@@ -2385,9 +2463,6 @@ static int mtk_hw_init(struct mtk_eth *e
+ goto err_disable_pm;
+ }
+
+- /* enable interrupt delay for RX */
+- mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
+-
+ /* disable delay and normal interrupt */
+ mtk_tx_irq_disable(eth, ~0);
+ mtk_rx_irq_disable(eth, ~0);
+@@ -2426,11 +2501,10 @@ static int mtk_hw_init(struct mtk_eth *e
+ /* Enable RX VLan Offloading */
+ mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
+
+- /* enable interrupt delay for RX */
+- mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
++ mtk_dim_rx(&eth->rx_dim.work);
++ mtk_dim_tx(&eth->tx_dim.work);
+
+ /* disable delay and normal interrupt */
+- mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
+ mtk_tx_irq_disable(eth, ~0);
+ mtk_rx_irq_disable(eth, ~0);
+
+@@ -2934,6 +3008,13 @@ static int mtk_probe(struct platform_dev
+ spin_lock_init(&eth->page_lock);
+ spin_lock_init(&eth->tx_irq_lock);
+ spin_lock_init(&eth->rx_irq_lock);
++ spin_lock_init(&eth->dim_lock);
++
++ eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
++ INIT_WORK(&eth->rx_dim.work, mtk_dim_rx);
++
++ eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
++ INIT_WORK(&eth->tx_dim.work, mtk_dim_tx);
+
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -15,6 +15,7 @@
+ #include <linux/u64_stats_sync.h>
+ #include <linux/refcount.h>
+ #include <linux/phylink.h>
++#include <linux/dim.h>
+
+ #define MTK_QDMA_PAGE_SIZE 2048
+ #define MTK_MAX_RX_LENGTH 1536
+@@ -131,13 +132,18 @@
+
+ /* PDMA Delay Interrupt Register */
+ #define MTK_PDMA_DELAY_INT 0xa0c
++#define MTK_PDMA_DELAY_RX_MASK GENMASK(15, 0)
+ #define MTK_PDMA_DELAY_RX_EN BIT(15)
+-#define MTK_PDMA_DELAY_RX_PINT 4
+ #define MTK_PDMA_DELAY_RX_PINT_SHIFT 8
+-#define MTK_PDMA_DELAY_RX_PTIME 4
+-#define MTK_PDMA_DELAY_RX_DELAY \
+- (MTK_PDMA_DELAY_RX_EN | MTK_PDMA_DELAY_RX_PTIME | \
+- (MTK_PDMA_DELAY_RX_PINT << MTK_PDMA_DELAY_RX_PINT_SHIFT))
++#define MTK_PDMA_DELAY_RX_PTIME_SHIFT 0
++
++#define MTK_PDMA_DELAY_TX_MASK GENMASK(31, 16)
++#define MTK_PDMA_DELAY_TX_EN BIT(31)
++#define MTK_PDMA_DELAY_TX_PINT_SHIFT 24
++#define MTK_PDMA_DELAY_TX_PTIME_SHIFT 16
++
++#define MTK_PDMA_DELAY_PINT_MASK 0x7f
++#define MTK_PDMA_DELAY_PTIME_MASK 0xff
+
+ /* PDMA Interrupt Status Register */
+ #define MTK_PDMA_INT_STATUS 0xa20
+@@ -219,6 +225,7 @@
+ /* QDMA Interrupt Status Register */
+ #define MTK_QDMA_INT_STATUS 0x1A18
+ #define MTK_RX_DONE_DLY BIT(30)
++#define MTK_TX_DONE_DLY BIT(28)
+ #define MTK_RX_DONE_INT3 BIT(19)
+ #define MTK_RX_DONE_INT2 BIT(18)
+ #define MTK_RX_DONE_INT1 BIT(17)
+@@ -228,8 +235,7 @@
+ #define MTK_TX_DONE_INT1 BIT(1)
+ #define MTK_TX_DONE_INT0 BIT(0)
+ #define MTK_RX_DONE_INT MTK_RX_DONE_DLY
+-#define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
+- MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
++#define MTK_TX_DONE_INT MTK_TX_DONE_DLY
+
+ /* QDMA Interrupt grouping registers */
+ #define MTK_QDMA_INT_GRP1 0x1a20
+@@ -892,6 +898,18 @@ struct mtk_eth {
+
+ const struct mtk_soc_data *soc;
+
++ spinlock_t dim_lock;
++
++ u32 rx_events;
++ u32 rx_packets;
++ u32 rx_bytes;
++ struct dim rx_dim;
++
++ u32 tx_events;
++ u32 tx_packets;
++ u32 tx_bytes;
++ struct dim tx_dim;
++
+ u32 tx_int_mask_reg;
+ u32 tx_int_status_reg;
+ u32 rx_dma_l4_valid;
diff --git a/target/linux/generic/pending-5.10/770-08-net-ethernet-mtk_eth_soc-cache-hardware-pointer-of-l.patch b/target/linux/generic/pending-5.10/770-08-net-ethernet-mtk_eth_soc-cache-hardware-pointer-of-l.patch
new file mode 100644
index 0000000000..20181b8d12
--- /dev/null
+++ b/target/linux/generic/pending-5.10/770-08-net-ethernet-mtk_eth_soc-cache-hardware-pointer-of-l.patch
@@ -0,0 +1,67 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Thu, 27 Aug 2020 06:32:03 +0200
+Subject: [PATCH] net: ethernet: mtk_eth_soc: cache hardware pointer of last
+ freed tx descriptor
+
+The value is only updated by the CPU, so it is cheaper to access from the ring
+data structure than from a hardware register
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -1362,7 +1362,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
+ struct mtk_tx_buf *tx_buf;
+ u32 cpu, dma;
+
+- cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
++ cpu = ring->last_free_ptr;
+ dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
+
+ desc = mtk_qdma_phys_to_virt(ring, cpu);
+@@ -1396,6 +1396,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
+ cpu = next_cpu;
+ }
+
++ ring->last_free_ptr = cpu;
+ mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
+
+ return budget;
+@@ -1596,6 +1597,7 @@ static int mtk_tx_alloc(struct mtk_eth *
+ atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
+ ring->next_free = &ring->dma[0];
+ ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
++ ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
+ ring->thresh = MAX_SKB_FRAGS;
+
+ /* make sure that all changes to the dma ring are flushed before we
+@@ -1609,9 +1611,7 @@ static int mtk_tx_alloc(struct mtk_eth *
+ mtk_w32(eth,
+ ring->phys + ((MTK_DMA_SIZE - 1) * sz),
+ MTK_QTX_CRX_PTR);
+- mtk_w32(eth,
+- ring->phys + ((MTK_DMA_SIZE - 1) * sz),
+- MTK_QTX_DRX_PTR);
++ mtk_w32(eth, ring->last_free_ptr, MTK_QTX_DRX_PTR);
+ mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
+ MTK_QTX_CFG(0));
+ } else {
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -624,6 +624,7 @@ struct mtk_tx_buf {
+ * @phys: The physical addr of tx_buf
+ * @next_free: Pointer to the next free descriptor
+ * @last_free: Pointer to the last free descriptor
++ * @last_free_ptr: Hardware pointer value of the last free descriptor
+ * @thresh: The threshold of minimum amount of free descriptors
+ * @free_count: QDMA uses a linked list. Track how many free descriptors
+ * are present
+@@ -634,6 +635,7 @@ struct mtk_tx_ring {
+ dma_addr_t phys;
+ struct mtk_tx_dma *next_free;
+ struct mtk_tx_dma *last_free;
++ u32 last_free_ptr;
+ u16 thresh;
+ atomic_t free_count;
+ int dma_size;
diff --git a/target/linux/generic/pending-5.10/770-09-net-ethernet-mtk_eth_soc-only-read-the-full-rx-descr.patch b/target/linux/generic/pending-5.10/770-09-net-ethernet-mtk_eth_soc-only-read-the-full-rx-descr.patch
new file mode 100644
index 0000000000..bbd8ae7be4
--- /dev/null
+++ b/target/linux/generic/pending-5.10/770-09-net-ethernet-mtk_eth_soc-only-read-the-full-rx-descr.patch
@@ -0,0 +1,44 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Thu, 27 Aug 2020 09:24:25 +0200
+Subject: [PATCH] net: ethernet: mtk_eth_soc: only read the full rx
+ descriptor if DMA is done
+
+Uncached memory access is expensive, and there is no need to access all
+descriptor words if we can't process them anyway
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -776,13 +776,18 @@ static inline int mtk_max_buf_size(int f
+ return buf_size;
+ }
+
+-static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
++static inline bool mtk_rx_get_desc(struct mtk_rx_dma *rxd,
+ struct mtk_rx_dma *dma_rxd)
+ {
+- rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
+ rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
++ if (!(rxd->rxd2 & RX_DMA_DONE))
++ return false;
++
++ rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
+ rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
+ rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
++
++ return true;
+ }
+
+ /* the qdma core needs scratch memory to be setup */
+@@ -1254,8 +1259,7 @@ static int mtk_poll_rx(struct napi_struc
+ rxd = &ring->dma[idx];
+ data = ring->data[idx];
+
+- mtk_rx_get_desc(&trxd, rxd);
+- if (!(trxd.rxd2 & RX_DMA_DONE))
++ if (!mtk_rx_get_desc(&trxd, rxd))
+ break;
+
+ /* find out which mac the packet come from. values start at 1 */
diff --git a/target/linux/generic/pending-5.10/770-10-net-ethernet-mtk_eth_soc-unmap-rx-data-before-callin.patch b/target/linux/generic/pending-5.10/770-10-net-ethernet-mtk_eth_soc-unmap-rx-data-before-callin.patch
new file mode 100644
index 0000000000..4a19fa1164
--- /dev/null
+++ b/target/linux/generic/pending-5.10/770-10-net-ethernet-mtk_eth_soc-unmap-rx-data-before-callin.patch
@@ -0,0 +1,44 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Thu, 27 Aug 2020 09:44:43 +0200
+Subject: [PATCH] net: ethernet: mtk_eth_soc: unmap rx data before calling
+ build_skb
+
+Since build_skb accesses the data area (for initializing shinfo), dma unmap
+needs to happen before that call
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -1297,17 +1297,18 @@ static int mtk_poll_rx(struct napi_struc
+ goto release_desc;
+ }
+
++ dma_unmap_single(eth->dev, trxd.rxd1,
++ ring->buf_size, DMA_FROM_DEVICE);
++
+ /* receive data */
+ skb = build_skb(data, ring->frag_size);
+ if (unlikely(!skb)) {
+- skb_free_frag(new_data);
++ skb_free_frag(data);
+ netdev->stats.rx_dropped++;
+- goto release_desc;
++ goto skip_rx;
+ }
+ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+
+- dma_unmap_single(eth->dev, trxd.rxd1,
+- ring->buf_size, DMA_FROM_DEVICE);
+ pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
+ skb->dev = netdev;
+ skb_put(skb, pktlen);
+@@ -1325,6 +1326,7 @@ static int mtk_poll_rx(struct napi_struc
+ skb_record_rx_queue(skb, 0);
+ napi_gro_receive(napi, skb);
+
++skip_rx:
+ ring->data[idx] = new_data;
+ rxd->rxd1 = (unsigned int)dma_addr;
+
diff --git a/target/linux/generic/pending-5.10/770-11-net-ethernet-mtk_eth_soc-avoid-rearming-interrupt-if.patch b/target/linux/generic/pending-5.10/770-11-net-ethernet-mtk_eth_soc-avoid-rearming-interrupt-if.patch
new file mode 100644
index 0000000000..4c61951d77
--- /dev/null
+++ b/target/linux/generic/pending-5.10/770-11-net-ethernet-mtk_eth_soc-avoid-rearming-interrupt-if.patch
@@ -0,0 +1,35 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Fri, 4 Sep 2020 18:14:05 +0200
+Subject: [PATCH] net: ethernet: mtk_eth_soc: avoid rearming interrupt if
+ napi_complete returns false
+
+Reduces unnecessary interrupts
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -1519,8 +1519,8 @@ static int mtk_napi_tx(struct napi_struc
+ if (status & MTK_TX_DONE_INT)
+ return budget;
+
+- napi_complete(napi);
+- mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
++ if (napi_complete(napi))
++ mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+
+ return tx_done;
+ }
+@@ -1553,8 +1553,9 @@ poll_again:
+ remain_budget -= rx_done;
+ goto poll_again;
+ }
+- napi_complete(napi);
+- mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
++
++ if (napi_complete(napi))
++ mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
+
+ return rx_done + budget - remain_budget;
+ }
diff --git a/target/linux/generic/pending-5.10/770-13-net-ethernet-mtk_eth_soc-fix-parsing-packets-in-GDM.patch b/target/linux/generic/pending-5.10/770-13-net-ethernet-mtk_eth_soc-fix-parsing-packets-in-GDM.patch
new file mode 100644
index 0000000000..d81d4bf5f4
--- /dev/null
+++ b/target/linux/generic/pending-5.10/770-13-net-ethernet-mtk_eth_soc-fix-parsing-packets-in-GDM.patch
@@ -0,0 +1,67 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Sun, 13 Sep 2020 08:17:02 +0200
+Subject: [PATCH] net: ethernet: mtk_eth_soc: fix parsing packets in GDM
+
+When using DSA, set the special tag in GDM ingress control to allow the MAC
+to parse packets properly earlier. This affects rx DMA source port reporting.
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -19,6 +19,7 @@
+ #include <linux/interrupt.h>
+ #include <linux/pinctrl/devinfo.h>
+ #include <linux/phylink.h>
++#include <net/dsa.h>
+
+ #include "mtk_eth_soc.h"
+
+@@ -1263,13 +1264,12 @@ static int mtk_poll_rx(struct napi_struc
+ break;
+
+ /* find out which mac the packet come from. values start at 1 */
+- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) ||
++ (trxd.rxd4 & RX_DMA_SPECIAL_TAG))
+ mac = 0;
+- } else {
+- mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
+- RX_DMA_FPORT_MASK;
+- mac--;
+- }
++ else
++ mac = ((trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
++ RX_DMA_FPORT_MASK) - 1;
+
+ if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
+ !eth->netdev[mac]))
+@@ -2251,6 +2251,9 @@ static void mtk_gdm_config(struct mtk_et
+
+ val |= config;
+
++ if (!i && eth->netdev[0] && netdev_uses_dsa(eth->netdev[0]))
++ val |= MTK_GDMA_SPECIAL_TAG;
++
+ mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
+ }
+ /* Reset and enable PSE */
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -82,6 +82,7 @@
+
+ /* GDM Exgress Control Register */
+ #define MTK_GDMA_FWD_CFG(x) (0x500 + (x * 0x1000))
++#define MTK_GDMA_SPECIAL_TAG BIT(24)
+ #define MTK_GDMA_ICS_EN BIT(22)
+ #define MTK_GDMA_TCS_EN BIT(21)
+ #define MTK_GDMA_UCS_EN BIT(20)
+@@ -311,6 +312,7 @@
+ #define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */
+ #define RX_DMA_FPORT_SHIFT 19
+ #define RX_DMA_FPORT_MASK 0x7
++#define RX_DMA_SPECIAL_TAG BIT(22)
+
+ /* PHY Indirect Access Control registers */
+ #define MTK_PHY_IAC 0x10004
diff --git a/target/linux/generic/pending-5.10/770-14-net-ethernet-mtk_eth_soc-set-PPE-flow-hash-as-skb-ha.patch b/target/linux/generic/pending-5.10/770-14-net-ethernet-mtk_eth_soc-set-PPE-flow-hash-as-skb-ha.patch
new file mode 100644
index 0000000000..e94b352b9c
--- /dev/null
+++ b/target/linux/generic/pending-5.10/770-14-net-ethernet-mtk_eth_soc-set-PPE-flow-hash-as-skb-ha.patch
@@ -0,0 +1,41 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Sun, 13 Sep 2020 08:27:24 +0200
+Subject: [PATCH] net: ethernet: mtk_eth_soc: set PPE flow hash as skb hash
+ if present
+
+This improves GRO performance
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -19,6 +19,7 @@
+ #include <linux/interrupt.h>
+ #include <linux/pinctrl/devinfo.h>
+ #include <linux/phylink.h>
++#include <linux/jhash.h>
+ #include <net/dsa.h>
+
+ #include "mtk_eth_soc.h"
+@@ -1250,6 +1251,7 @@ static int mtk_poll_rx(struct napi_struc
+ struct net_device *netdev;
+ unsigned int pktlen;
+ dma_addr_t dma_addr;
++ u32 hash;
+ int mac;
+
+ ring = mtk_get_rx_ring(eth);
+@@ -1319,6 +1321,12 @@ static int mtk_poll_rx(struct napi_struc
+ skb->protocol = eth_type_trans(skb, netdev);
+ bytes += pktlen;
+
++ hash = trxd.rxd4 & GENMASK(13, 0);
++ if (hash != GENMASK(13, 0)) {
++ hash = jhash_1word(hash, 0);
++ skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
++ }
++
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
+ (trxd.rxd2 & RX_DMA_VTAG))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
diff --git a/target/linux/generic/pending-5.10/770-15-net-ethernet-mtk_eth_soc-add-support-for-initializin.patch b/target/linux/generic/pending-5.10/770-15-net-ethernet-mtk_eth_soc-add-support-for-initializin.patch
new file mode 100644
index 0000000000..fa4803211a
--- /dev/null
+++ b/target/linux/generic/pending-5.10/770-15-net-ethernet-mtk_eth_soc-add-support-for-initializin.patch
@@ -0,0 +1,1307 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Sun, 11 Oct 2020 22:23:08 +0200
+Subject: [PATCH] ethernet: mediatek: mtk_eth_soc: add support for
+ initializing the PPE
+
+The PPE (packet processing engine) is used to offload NAT/routed or even
+bridged flows. This patch brings up the PPE and uses it to get a packet
+hash. It also contains some functionality that will be used to bring up
+flow offloading.
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ create mode 100644 drivers/net/ethernet/mediatek/mtk_ppe.c
+ create mode 100644 drivers/net/ethernet/mediatek/mtk_ppe.h
+ create mode 100644 drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
+ create mode 100644 drivers/net/ethernet/mediatek/mtk_ppe_regs.h
+
+--- a/drivers/net/ethernet/mediatek/Makefile
++++ b/drivers/net/ethernet/mediatek/Makefile
+@@ -4,5 +4,5 @@
+ #
+
+ obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
+-mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o
++mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o
+ obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -2284,12 +2284,17 @@ static int mtk_open(struct net_device *d
+
+ /* we run 2 netdevs on the same dma ring so we only bring it up once */
+ if (!refcount_read(&eth->dma_refcnt)) {
+- int err = mtk_start_dma(eth);
++ u32 gdm_config = MTK_GDMA_TO_PDMA;
++ int err;
+
++ err = mtk_start_dma(eth);
+ if (err)
+ return err;
+
+- mtk_gdm_config(eth, MTK_GDMA_TO_PDMA);
++ if (eth->soc->offload_version && mtk_ppe_start(&eth->ppe) == 0)
++ gdm_config = MTK_GDMA_TO_PPE;
++
++ mtk_gdm_config(eth, gdm_config);
+
+ napi_enable(&eth->tx_napi);
+ napi_enable(&eth->rx_napi);
+@@ -2359,6 +2364,9 @@ static int mtk_stop(struct net_device *d
+
+ mtk_dma_free(eth);
+
++ if (eth->soc->offload_version)
++ mtk_ppe_stop(&eth->ppe);
++
+ return 0;
+ }
+
+@@ -3148,6 +3156,13 @@ static int mtk_probe(struct platform_dev
+ goto err_free_dev;
+ }
+
++ if (eth->soc->offload_version) {
++ err = mtk_ppe_init(&eth->ppe, eth->dev,
++ eth->base + MTK_ETH_PPE_BASE, 2);
++ if (err)
++ goto err_free_dev;
++ }
++
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ if (!eth->netdev[i])
+ continue;
+@@ -3222,6 +3237,7 @@ static const struct mtk_soc_data mt7621_
+ .hw_features = MTK_HW_FEATURES,
+ .required_clks = MT7621_CLKS_BITMAP,
+ .required_pctl = false,
++ .offload_version = 2,
+ };
+
+ static const struct mtk_soc_data mt7622_data = {
+@@ -3230,6 +3246,7 @@ static const struct mtk_soc_data mt7622_
+ .hw_features = MTK_HW_FEATURES,
+ .required_clks = MT7622_CLKS_BITMAP,
+ .required_pctl = false,
++ .offload_version = 2,
+ };
+
+ static const struct mtk_soc_data mt7623_data = {
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -16,6 +16,7 @@
+ #include <linux/refcount.h>
+ #include <linux/phylink.h>
+ #include <linux/dim.h>
++#include "mtk_ppe.h"
+
+ #define MTK_QDMA_PAGE_SIZE 2048
+ #define MTK_MAX_RX_LENGTH 1536
+@@ -87,6 +88,7 @@
+ #define MTK_GDMA_TCS_EN BIT(21)
+ #define MTK_GDMA_UCS_EN BIT(20)
+ #define MTK_GDMA_TO_PDMA 0x0
++#define MTK_GDMA_TO_PPE 0x4444
+ #define MTK_GDMA_DROP_ALL 0x7777
+
+ /* Unicast Filter MAC Address Register - Low */
+@@ -308,6 +310,12 @@
+ #define RX_DMA_VID(_x) ((_x) & 0xfff)
+
+ /* QDMA descriptor rxd4 */
++#define MTK_RXD4_FOE_ENTRY GENMASK(13, 0)
++#define MTK_RXD4_PPE_CPU_REASON GENMASK(18, 14)
++#define MTK_RXD4_SRC_PORT GENMASK(21, 19)
++#define MTK_RXD4_ALG GENMASK(31, 22)
++
++/* QDMA descriptor rxd4 */
+ #define RX_DMA_L4_VALID BIT(24)
+ #define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */
+ #define RX_DMA_FPORT_SHIFT 19
+@@ -807,6 +815,7 @@ struct mtk_soc_data {
+ u32 caps;
+ u32 required_clks;
+ bool required_pctl;
++ u8 offload_version;
+ netdev_features_t hw_features;
+ };
+
+@@ -918,6 +927,8 @@ struct mtk_eth {
+ u32 tx_int_status_reg;
+ u32 rx_dma_l4_valid;
+ int ip_align;
++
++ struct mtk_ppe ppe;
+ };
+
+ /* struct mtk_mac - the structure that holds the info about the MACs of the
+--- /dev/null
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
+@@ -0,0 +1,511 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
++
++#include <linux/kernel.h>
++#include <linux/jiffies.h>
++#include <linux/delay.h>
++#include <linux/io.h>
++#include <linux/etherdevice.h>
++#include <linux/platform_device.h>
++#include "mtk_ppe.h"
++#include "mtk_ppe_regs.h"
++
++static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
++{
++ writel(val, ppe->base + reg);
++}
++
++static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg)
++{
++ return readl(ppe->base + reg);
++}
++
++static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set)
++{
++ u32 val;
++
++ val = ppe_r32(ppe, reg);
++ val &= ~mask;
++ val |= set;
++ ppe_w32(ppe, reg, val);
++
++ return val;
++}
++
++static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val)
++{
++ return ppe_m32(ppe, reg, 0, val);
++}
++
++static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
++{
++ return ppe_m32(ppe, reg, val, 0);
++}
++
++static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
++{
++ unsigned long timeout = jiffies + HZ;
++
++ while (time_is_before_jiffies(timeout)) {
++ if (!(ppe_r32(ppe, MTK_PPE_GLO_CFG) & MTK_PPE_GLO_CFG_BUSY))
++ return 0;
++
++ usleep_range(10, 20);
++ }
++
++ dev_err(ppe->dev, "PPE table busy");
++
++ return -ETIMEDOUT;
++}
++
++static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
++{
++ ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
++ ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
++}
++
++static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
++{
++ mtk_ppe_cache_clear(ppe);
++
++ ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN,
++ enable * MTK_PPE_CACHE_CTL_EN);
++}
++
++static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
++{
++ u32 hv1, hv2, hv3;
++ u32 hash;
++
++ switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) {
++ case MTK_PPE_PKT_TYPE_BRIDGE:
++ hv1 = e->bridge.src_mac_lo;
++ hv1 ^= ((e->bridge.src_mac_hi & 0xffff) << 16);
++ hv2 = e->bridge.src_mac_hi >> 16;
++ hv2 ^= e->bridge.dest_mac_lo;
++ hv3 = e->bridge.dest_mac_hi;
++ break;
++ case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
++ case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
++ hv1 = e->ipv4.orig.ports;
++ hv2 = e->ipv4.orig.dest_ip;
++ hv3 = e->ipv4.orig.src_ip;
++ break;
++ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
++ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
++ hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3];
++ hv1 ^= e->ipv6.ports;
++
++ hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2];
++ hv2 ^= e->ipv6.dest_ip[0];
++
++ hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1];
++ hv3 ^= e->ipv6.src_ip[0];
++ break;
++ case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
++ case MTK_PPE_PKT_TYPE_IPV6_6RD:
++ default:
++ WARN_ON_ONCE(1);
++ return MTK_PPE_HASH_MASK;
++ }
++
++ hash = (hv1 & hv2) | ((~hv1) & hv3);
++ hash = (hash >> 24) | ((hash & 0xffffff) << 8);
++ hash ^= hv1 ^ hv2 ^ hv3;
++ hash ^= hash >> 16;
++ hash <<= 1;
++ hash &= MTK_PPE_ENTRIES - 1;
++
++ return hash;
++}
++
++static inline struct mtk_foe_mac_info *
++mtk_foe_entry_l2(struct mtk_foe_entry *entry)
++{
++ int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
++
++ if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
++ return &entry->ipv6.l2;
++
++ return &entry->ipv4.l2;
++}
++
++static inline u32 *
++mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
++{
++ int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
++
++ if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
++ return &entry->ipv6.ib2;
++
++ return &entry->ipv4.ib2;
++}
++
++int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
++ u8 pse_port, u8 *src_mac, u8 *dest_mac)
++{
++ struct mtk_foe_mac_info *l2;
++ u32 ports_pad, val;
++
++ memset(entry, 0, sizeof(*entry));
++
++ val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
++ FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
++ FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
++ MTK_FOE_IB1_BIND_TTL |
++ MTK_FOE_IB1_BIND_CACHE;
++ entry->ib1 = val;
++
++ val = FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) |
++ FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f) |
++ FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port);
++
++ if (is_multicast_ether_addr(dest_mac))
++ val |= MTK_FOE_IB2_MULTICAST;
++
++ ports_pad = 0xa5a5a500 | (l4proto & 0xff);
++ if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
++ entry->ipv4.orig.ports = ports_pad;
++ if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
++ entry->ipv6.ports = ports_pad;
++
++ if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
++ entry->ipv6.ib2 = val;
++ l2 = &entry->ipv6.l2;
++ } else {
++ entry->ipv4.ib2 = val;
++ l2 = &entry->ipv4.l2;
++ }
++
++ l2->dest_mac_hi = get_unaligned_be32(dest_mac);
++ l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4);
++ l2->src_mac_hi = get_unaligned_be32(src_mac);
++ l2->src_mac_lo = get_unaligned_be16(src_mac + 4);
++
++ if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
++ l2->etype = ETH_P_IPV6;
++ else
++ l2->etype = ETH_P_IP;
++
++ return 0;
++}
++
++int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port)
++{
++ u32 *ib2 = mtk_foe_entry_ib2(entry);
++ u32 val;
++
++ val = *ib2;
++ val &= ~MTK_FOE_IB2_DEST_PORT;
++ val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
++ *ib2 = val;
++
++ return 0;
++}
++
++int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool egress,
++ __be32 src_addr, __be16 src_port,
++ __be32 dest_addr, __be16 dest_port)
++{
++ int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
++ struct mtk_ipv4_tuple *t;
++
++ switch (type) {
++ case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
++ if (egress) {
++ t = &entry->ipv4.new;
++ break;
++ }
++ fallthrough;
++ case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
++ case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
++ t = &entry->ipv4.orig;
++ break;
++ case MTK_PPE_PKT_TYPE_IPV6_6RD:
++ entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr);
++ entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr);
++ return 0;
++ default:
++ WARN_ON_ONCE(1);
++ return -EINVAL;
++ }
++
++ t->src_ip = be32_to_cpu(src_addr);
++ t->dest_ip = be32_to_cpu(dest_addr);
++
++ if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
++ return 0;
++
++ t->src_port = be16_to_cpu(src_port);
++ t->dest_port = be16_to_cpu(dest_port);
++
++ return 0;
++}
++
++int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
++ __be32 *src_addr, __be16 src_port,
++ __be32 *dest_addr, __be16 dest_port)
++{
++ int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
++ u32 *src, *dest;
++ int i;
++
++ switch (type) {
++ case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
++ src = entry->dslite.tunnel_src_ip;
++ dest = entry->dslite.tunnel_dest_ip;
++ break;
++ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
++ case MTK_PPE_PKT_TYPE_IPV6_6RD:
++ entry->ipv6.src_port = be16_to_cpu(src_port);
++ entry->ipv6.dest_port = be16_to_cpu(dest_port);
++ fallthrough;
++ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
++ src = entry->ipv6.src_ip;
++ dest = entry->ipv6.dest_ip;
++ break;
++ default:
++ WARN_ON_ONCE(1);
++ return -EINVAL;
++ };
++
++ for (i = 0; i < 4; i++)
++ src[i] = be32_to_cpu(src_addr[i]);
++ for (i = 0; i < 4; i++)
++ dest[i] = be32_to_cpu(dest_addr[i]);
++
++ return 0;
++}
++
++int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port)
++{
++ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
++
++ l2->etype = BIT(port);
++
++ if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER))
++ entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
++ else
++ l2->etype |= BIT(8);
++
++ entry->ib1 &= ~MTK_FOE_IB1_BIND_VLAN_TAG;
++
++ return 0;
++}
++
++int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid)
++{
++ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
++
++ switch (FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, entry->ib1)) {
++ case 0:
++ entry->ib1 |= MTK_FOE_IB1_BIND_VLAN_TAG |
++ FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
++ l2->vlan1 = vid;
++ return 0;
++ case 1:
++ if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG)) {
++ l2->vlan1 = vid;
++ l2->etype |= BIT(8);
++ } else {
++ l2->vlan2 = vid;
++ entry->ib1 += FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
++ }
++ return 0;
++ default:
++ return -ENOSPC;
++ }
++}
++
++int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
++{
++ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
++
++ if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER) ||
++ (entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG))
++ l2->etype = ETH_P_PPP_SES;
++
++ entry->ib1 |= MTK_FOE_IB1_BIND_PPPOE;
++ l2->pppoe_id = sid;
++
++ return 0;
++}
++
++static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
++{
++ return !(entry->ib1 & MTK_FOE_IB1_STATIC) &&
++ FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
++}
++
++int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
++ u16 timestamp)
++{
++ struct mtk_foe_entry *hwe;
++ u32 hash;
++
++ timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP;
++ entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
++ entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
++
++ hash = mtk_ppe_hash_entry(entry);
++ hwe = &ppe->foe_table[hash];
++ if (!mtk_foe_entry_usable(hwe)) {
++ hwe++;
++ hash++;
++
++ if (!mtk_foe_entry_usable(hwe))
++ return -ENOSPC;
++ }
++
++ memcpy(&hwe->data, &entry->data, sizeof(hwe->data));
++ wmb();
++ hwe->ib1 = entry->ib1;
++
++ dma_wmb();
++
++ mtk_ppe_cache_clear(ppe);
++
++ return hash;
++}
++
++int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
++ int version)
++{
++ struct mtk_foe_entry *foe;
++
++ /* need to allocate a separate device, since it PPE DMA access is
++ * not coherent.
++ */
++ ppe->base = base;
++ ppe->dev = dev;
++ ppe->version = version;
++
++ foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe),
++ &ppe->foe_phys, GFP_KERNEL);
++ if (!foe)
++ return -ENOMEM;
++
++ ppe->foe_table = foe;
++
++ mtk_ppe_debugfs_init(ppe);
++
++ return 0;
++}
++
++static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
++{
++ static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
++ int i, k;
++
++ memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(ppe->foe_table));
++
++ if (!IS_ENABLED(CONFIG_SOC_MT7621))
++ return;
++
++ /* skip all entries that cross the 1024 byte boundary */
++ for (i = 0; i < MTK_PPE_ENTRIES; i += 128)
++ for (k = 0; k < ARRAY_SIZE(skip); k++)
++ ppe->foe_table[i + skip[k]].ib1 |= MTK_FOE_IB1_STATIC;
++}
++
++int mtk_ppe_start(struct mtk_ppe *ppe)
++{
++ u32 val;
++
++ mtk_ppe_init_foe_table(ppe);
++ ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
++
++ val = MTK_PPE_TB_CFG_ENTRY_80B |
++ MTK_PPE_TB_CFG_AGE_NON_L4 |
++ MTK_PPE_TB_CFG_AGE_UNBIND |
++ MTK_PPE_TB_CFG_AGE_TCP |
++ MTK_PPE_TB_CFG_AGE_UDP |
++ MTK_PPE_TB_CFG_AGE_TCP_FIN |
++ FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS,
++ MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) |
++ FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE,
++ MTK_PPE_KEEPALIVE_DISABLE) |
++ FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
++ FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
++ MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
++ FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
++ MTK_PPE_ENTRIES_SHIFT);
++ ppe_w32(ppe, MTK_PPE_TB_CFG, val);
++
++ ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
++ MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6);
++
++ mtk_ppe_cache_enable(ppe, true);
++
++ val = MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
++ MTK_PPE_FLOW_CFG_IP4_UDP_FRAG |
++ MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
++ MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
++ MTK_PPE_FLOW_CFG_IP6_6RD |
++ MTK_PPE_FLOW_CFG_IP4_NAT |
++ MTK_PPE_FLOW_CFG_IP4_NAPT |
++ MTK_PPE_FLOW_CFG_IP4_DSLITE |
++ MTK_PPE_FLOW_CFG_L2_BRIDGE |
++ MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
++ ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
++
++ val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
++ FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3);
++ ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val);
++
++ val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 12) |
++ FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1);
++ ppe_w32(ppe, MTK_PPE_BIND_AGE0, val);
++
++ val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
++ FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 7);
++ ppe_w32(ppe, MTK_PPE_BIND_AGE1, val);
++
++ val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF;
++ ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val);
++
++ val = MTK_PPE_BIND_LIMIT1_FULL |
++ FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1);
++ ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val);
++
++ val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) |
++ FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1);
++ ppe_w32(ppe, MTK_PPE_BIND_RATE, val);
++
++ /* enable PPE */
++ val = MTK_PPE_GLO_CFG_EN |
++ MTK_PPE_GLO_CFG_IP4_L4_CS_DROP |
++ MTK_PPE_GLO_CFG_IP4_CS_DROP |
++ MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE;
++ ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
++
++ ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
++
++ return 0;
++}
++
++int mtk_ppe_stop(struct mtk_ppe *ppe)
++{
++ u32 val;
++ int i;
++
++ for (i = 0; i < MTK_PPE_ENTRIES; i++)
++ ppe->foe_table[i].ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
++ MTK_FOE_STATE_INVALID);
++
++ mtk_ppe_cache_enable(ppe, false);
++
++ /* disable offload engine */
++ ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
++ ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
++
++ /* disable aging */
++ val = MTK_PPE_TB_CFG_AGE_NON_L4 |
++ MTK_PPE_TB_CFG_AGE_UNBIND |
++ MTK_PPE_TB_CFG_AGE_TCP |
++ MTK_PPE_TB_CFG_AGE_UDP |
++ MTK_PPE_TB_CFG_AGE_TCP_FIN;
++ ppe_clear(ppe, MTK_PPE_TB_CFG, val);
++
++ return mtk_ppe_wait_busy(ppe);
++}
+--- /dev/null
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
+@@ -0,0 +1,287 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
++
++#ifndef __MTK_PPE_H
++#define __MTK_PPE_H
++
++#include <linux/kernel.h>
++#include <linux/bitfield.h>
++
++#define MTK_ETH_PPE_BASE 0xc00
++
++#define MTK_PPE_ENTRIES_SHIFT 3
++#define MTK_PPE_ENTRIES (1024 << MTK_PPE_ENTRIES_SHIFT)
++#define MTK_PPE_HASH_MASK (MTK_PPE_ENTRIES - 1)
++
++#define MTK_FOE_IB1_UNBIND_TIMESTAMP GENMASK(7, 0)
++#define MTK_FOE_IB1_UNBIND_PACKETS GENMASK(23, 8)
++#define MTK_FOE_IB1_UNBIND_PREBIND BIT(24)
++
++#define MTK_FOE_IB1_BIND_TIMESTAMP GENMASK(14, 0)
++#define MTK_FOE_IB1_BIND_KEEPALIVE BIT(15)
++#define MTK_FOE_IB1_BIND_VLAN_LAYER GENMASK(18, 16)
++#define MTK_FOE_IB1_BIND_PPPOE BIT(19)
++#define MTK_FOE_IB1_BIND_VLAN_TAG BIT(20)
++#define MTK_FOE_IB1_BIND_PKT_SAMPLE BIT(21)
++#define MTK_FOE_IB1_BIND_CACHE BIT(22)
++#define MTK_FOE_IB1_BIND_TUNNEL_DECAP BIT(23)
++#define MTK_FOE_IB1_BIND_TTL BIT(24)
++
++#define MTK_FOE_IB1_PACKET_TYPE GENMASK(27, 25)
++#define MTK_FOE_IB1_STATE GENMASK(29, 28)
++#define MTK_FOE_IB1_UDP BIT(30)
++#define MTK_FOE_IB1_STATIC BIT(31)
++
++enum {
++ MTK_PPE_PKT_TYPE_IPV4_HNAPT = 0,
++ MTK_PPE_PKT_TYPE_IPV4_ROUTE = 1,
++ MTK_PPE_PKT_TYPE_BRIDGE = 2,
++ MTK_PPE_PKT_TYPE_IPV4_DSLITE = 3,
++ MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T = 4,
++ MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T = 5,
++ MTK_PPE_PKT_TYPE_IPV6_6RD = 7,
++};
++
++#define MTK_FOE_IB2_QID GENMASK(3, 0)
++#define MTK_FOE_IB2_PSE_QOS BIT(4)
++#define MTK_FOE_IB2_DEST_PORT GENMASK(7, 5)
++#define MTK_FOE_IB2_MULTICAST BIT(8)
++
++#define MTK_FOE_IB2_WHNAT_QID2 GENMASK(13, 12)
++#define MTK_FOE_IB2_WHNAT_DEVIDX BIT(16)
++#define MTK_FOE_IB2_WHNAT_NAT BIT(17)
++
++#define MTK_FOE_IB2_PORT_MG GENMASK(17, 12)
++
++#define MTK_FOE_IB2_PORT_AG GENMASK(23, 18)
++
++#define MTK_FOE_IB2_DSCP GENMASK(31, 24)
++
++#define MTK_FOE_VLAN2_WHNAT_BSS GEMMASK(5, 0)
++#define MTK_FOE_VLAN2_WHNAT_WCID GENMASK(13, 6)
++#define MTK_FOE_VLAN2_WHNAT_RING GENMASK(15, 14)
++
++enum {
++ MTK_FOE_STATE_INVALID,
++ MTK_FOE_STATE_UNBIND,
++ MTK_FOE_STATE_BIND,
++ MTK_FOE_STATE_FIN
++};
++
++struct mtk_foe_mac_info {
++ u16 vlan1;
++ u16 etype;
++
++ u32 dest_mac_hi;
++
++ u16 vlan2;
++ u16 dest_mac_lo;
++
++ u32 src_mac_hi;
++
++ u16 pppoe_id;
++ u16 src_mac_lo;
++};
++
++struct mtk_foe_bridge {
++ u32 dest_mac_hi;
++
++ u16 src_mac_lo;
++ u16 dest_mac_lo;
++
++ u32 src_mac_hi;
++
++ u32 ib2;
++
++ u32 _rsv[5];
++
++ u32 udf_tsid;
++ struct mtk_foe_mac_info l2;
++};
++
++struct mtk_ipv4_tuple {
++ u32 src_ip;
++ u32 dest_ip;
++ union {
++ struct {
++ u16 dest_port;
++ u16 src_port;
++ };
++ struct {
++ u8 protocol;
++ u8 _pad[3]; /* fill with 0xa5a5a5 */
++ };
++ u32 ports;
++ };
++};
++
++struct mtk_foe_ipv4 {
++ struct mtk_ipv4_tuple orig;
++
++ u32 ib2;
++
++ struct mtk_ipv4_tuple new;
++
++ u16 timestamp;
++ u16 _rsv0[3];
++
++ u32 udf_tsid;
++
++ struct mtk_foe_mac_info l2;
++};
++
++struct mtk_foe_ipv4_dslite {
++ struct mtk_ipv4_tuple ip4;
++
++ u32 tunnel_src_ip[4];
++ u32 tunnel_dest_ip[4];
++
++ u8 flow_label[3];
++ u8 priority;
++
++ u32 udf_tsid;
++
++ u32 ib2;
++
++ struct mtk_foe_mac_info l2;
++};
++
++struct mtk_foe_ipv6 {
++ u32 src_ip[4];
++ u32 dest_ip[4];
++
++ union {
++ struct {
++ u8 protocol;
++ u8 _pad[3]; /* fill with 0xa5a5a5 */
++ }; /* 3-tuple */
++ struct {
++ u16 dest_port;
++ u16 src_port;
++ }; /* 5-tuple */
++ u32 ports;
++ };
++
++ u32 _rsv[3];
++
++ u32 udf;
++
++ u32 ib2;
++ struct mtk_foe_mac_info l2;
++};
++
++struct mtk_foe_ipv6_6rd {
++ u32 src_ip[4];
++ u32 dest_ip[4];
++ u16 dest_port;
++ u16 src_port;
++
++ u32 tunnel_src_ip;
++ u32 tunnel_dest_ip;
++
++ u16 hdr_csum;
++ u8 dscp;
++ u8 ttl;
++
++ u8 flag;
++ u8 pad;
++ u8 per_flow_6rd_id;
++ u8 pad2;
++
++ u32 ib2;
++ struct mtk_foe_mac_info l2;
++};
++
++struct mtk_foe_entry {
++ u32 ib1;
++
++ union {
++ struct mtk_foe_bridge bridge;
++ struct mtk_foe_ipv4 ipv4;
++ struct mtk_foe_ipv4_dslite dslite;
++ struct mtk_foe_ipv6 ipv6;
++ struct mtk_foe_ipv6_6rd ipv6_6rd;
++ u32 data[19];
++ };
++};
++
++enum {
++ MTK_PPE_CPU_REASON_TTL_EXCEEDED = 0x02,
++ MTK_PPE_CPU_REASON_OPTION_HEADER = 0x03,
++ MTK_PPE_CPU_REASON_NO_FLOW = 0x07,
++ MTK_PPE_CPU_REASON_IPV4_FRAG = 0x08,
++ MTK_PPE_CPU_REASON_IPV4_DSLITE_FRAG = 0x09,
++ MTK_PPE_CPU_REASON_IPV4_DSLITE_NO_TCP_UDP = 0x0a,
++ MTK_PPE_CPU_REASON_IPV6_6RD_NO_TCP_UDP = 0x0b,
++ MTK_PPE_CPU_REASON_TCP_FIN_SYN_RST = 0x0c,
++ MTK_PPE_CPU_REASON_UN_HIT = 0x0d,
++ MTK_PPE_CPU_REASON_HIT_UNBIND = 0x0e,
++ MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED = 0x0f,
++ MTK_PPE_CPU_REASON_HIT_BIND_TCP_FIN = 0x10,
++ MTK_PPE_CPU_REASON_HIT_TTL_1 = 0x11,
++ MTK_PPE_CPU_REASON_HIT_BIND_VLAN_VIOLATION = 0x12,
++ MTK_PPE_CPU_REASON_KEEPALIVE_UC_OLD_HDR = 0x13,
++ MTK_PPE_CPU_REASON_KEEPALIVE_MC_NEW_HDR = 0x14,
++ MTK_PPE_CPU_REASON_KEEPALIVE_DUP_OLD_HDR = 0x15,
++ MTK_PPE_CPU_REASON_HIT_BIND_FORCE_CPU = 0x16,
++ MTK_PPE_CPU_REASON_TUNNEL_OPTION_HEADER = 0x17,
++ MTK_PPE_CPU_REASON_MULTICAST_TO_CPU = 0x18,
++ MTK_PPE_CPU_REASON_MULTICAST_TO_GMAC1_CPU = 0x19,
++ MTK_PPE_CPU_REASON_HIT_PRE_BIND = 0x1a,
++ MTK_PPE_CPU_REASON_PACKET_SAMPLING = 0x1b,
++ MTK_PPE_CPU_REASON_EXCEED_MTU = 0x1c,
++ MTK_PPE_CPU_REASON_PPE_BYPASS = 0x1e,
++ MTK_PPE_CPU_REASON_INVALID = 0x1f,
++};
++
++struct mtk_ppe {
++ struct device *dev;
++ void __iomem *base;
++ int version;
++
++ struct mtk_foe_entry *foe_table;
++ dma_addr_t foe_phys;
++
++ void *acct_table;
++};
++
++int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
++ int version);
++int mtk_ppe_start(struct mtk_ppe *ppe);
++int mtk_ppe_stop(struct mtk_ppe *ppe);
++
++static inline void
++mtk_foe_entry_clear(struct mtk_ppe *ppe, u16 hash)
++{
++ ppe->foe_table[hash].ib1 = 0;
++ dma_wmb();
++}
++
++static inline int
++mtk_foe_entry_timestamp(struct mtk_ppe *ppe, u16 hash)
++{
++ u32 ib1 = READ_ONCE(ppe->foe_table[hash].ib1);
++
++ if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND)
++ return -1;
++
++ return FIELD_GET(MTK_FOE_IB1_BIND_TIMESTAMP, ib1);
++}
++
++int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
++ u8 pse_port, u8 *src_mac, u8 *dest_mac);
++int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port);
++int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool orig,
++ __be32 src_addr, __be16 src_port,
++ __be32 dest_addr, __be16 dest_port);
++int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
++ __be32 *src_addr, __be16 src_port,
++ __be32 *dest_addr, __be16 dest_port);
++int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port);
++int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid);
++int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
++int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
++ u16 timestamp);
++int mtk_ppe_debugfs_init(struct mtk_ppe *ppe);
++
++#endif
+--- /dev/null
++++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
+@@ -0,0 +1,217 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
++
++#include <linux/kernel.h>
++#include <linux/debugfs.h>
++#include "mtk_eth_soc.h"
++
++struct mtk_flow_addr_info
++{
++ void *src, *dest;
++ u16 *src_port, *dest_port;
++ bool ipv6;
++};
++
++static const char *mtk_foe_entry_state_str(int state)
++{
++ static const char * const state_str[] = {
++ [MTK_FOE_STATE_INVALID] = "INV",
++ [MTK_FOE_STATE_UNBIND] = "UNB",
++ [MTK_FOE_STATE_BIND] = "BND",
++ [MTK_FOE_STATE_FIN] = "FIN",
++ };
++
++ if (state >= ARRAY_SIZE(state_str) || !state_str[state])
++ return "UNK";
++
++ return state_str[state];
++}
++
++static const char *mtk_foe_pkt_type_str(int type)
++{
++ static const char * const type_str[] = {
++ [MTK_PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T",
++ [MTK_PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T",
++ [MTK_PPE_PKT_TYPE_BRIDGE] = "L2",
++ [MTK_PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE",
++ [MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T",
++ [MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T",
++ [MTK_PPE_PKT_TYPE_IPV6_6RD] = "6RD",
++ };
++
++ if (type >= ARRAY_SIZE(type_str) || !type_str[type])
++ return "UNKNOWN";
++
++ return type_str[type];
++}
++
++static void
++mtk_print_addr(struct seq_file *m, u32 *addr, bool ipv6)
++{
++ u32 n_addr[4];
++ int i;
++
++ if (!ipv6) {
++ seq_printf(m, "%pI4h", addr);
++ return;
++ }
++
++ for (i = 0; i < ARRAY_SIZE(n_addr); i++)
++ n_addr[i] = htonl(addr[i]);
++ seq_printf(m, "%pI6", n_addr);
++}
++
++static void
++mtk_print_addr_info(struct seq_file *m, struct mtk_flow_addr_info *ai)
++{
++ mtk_print_addr(m, ai->src, ai->ipv6);
++ if (ai->src_port)
++ seq_printf(m, ":%d", *ai->src_port);
++ seq_printf(m, "->");
++ mtk_print_addr(m, ai->dest, ai->ipv6);
++ if (ai->dest_port)
++ seq_printf(m, ":%d", *ai->dest_port);
++}
++
++static int
++mtk_ppe_debugfs_foe_show(struct seq_file *m, void *private, bool bind)
++{
++ struct mtk_ppe *ppe = m->private;
++ int i, count;
++
++ for (i = 0, count = 0; i < MTK_PPE_ENTRIES; i++) {
++ struct mtk_foe_entry *entry = &ppe->foe_table[i];
++ struct mtk_foe_mac_info *l2;
++ struct mtk_flow_addr_info ai = {};
++ unsigned char h_source[ETH_ALEN];
++ unsigned char h_dest[ETH_ALEN];
++ int type, state;
++ u32 ib2;
++
++
++ state = FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1);
++ if (!state)
++ continue;
++
++ if (bind && state != MTK_FOE_STATE_BIND)
++ continue;
++
++ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
++ seq_printf(m, "%05x %s %7s", i,
++ mtk_foe_entry_state_str(state),
++ mtk_foe_pkt_type_str(type));
++
++ switch (type) {
++ case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
++ case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
++ ai.src_port = &entry->ipv4.orig.src_port;
++ ai.dest_port = &entry->ipv4.orig.dest_port;
++ fallthrough;
++ case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
++ ai.src = &entry->ipv4.orig.src_ip;
++ ai.dest = &entry->ipv4.orig.dest_ip;
++ break;
++ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
++ ai.src_port = &entry->ipv6.src_port;
++ ai.dest_port = &entry->ipv6.dest_port;
++ fallthrough;
++ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
++ case MTK_PPE_PKT_TYPE_IPV6_6RD:
++ ai.src = &entry->ipv6.src_ip;
++ ai.dest = &entry->ipv6.dest_ip;
++ ai.ipv6 = true;
++ break;
++ }
++
++ seq_printf(m, " orig=");
++ mtk_print_addr_info(m, &ai);
++
++ switch (type) {
++ case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
++ case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
++ ai.src_port = &entry->ipv4.new.src_port;
++ ai.dest_port = &entry->ipv4.new.dest_port;
++ fallthrough;
++ case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
++ ai.src = &entry->ipv4.new.src_ip;
++ ai.dest = &entry->ipv4.new.dest_ip;
++ seq_printf(m, " new=");
++ mtk_print_addr_info(m, &ai);
++ break;
++ }
++
++ if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
++ l2 = &entry->ipv6.l2;
++ ib2 = entry->ipv6.ib2;
++ } else {
++ l2 = &entry->ipv4.l2;
++ ib2 = entry->ipv4.ib2;
++ }
++
++ *((__be32 *)h_source) = htonl(l2->src_mac_hi);
++ *((__be16 *)&h_source[4]) = htons(l2->src_mac_lo);
++ *((__be32 *)h_dest) = htonl(l2->dest_mac_hi);
++ *((__be16 *)&h_dest[4]) = htons(l2->dest_mac_lo);
++
++ seq_printf(m, " eth=%pM->%pM etype=%04x"
++ " vlan=%d,%d ib1=%08x ib2=%08x\n",
++ h_source, h_dest, ntohs(l2->etype),
++ l2->vlan1, l2->vlan2, entry->ib1, ib2);
++ }
++
++ return 0;
++}
++
++static int
++mtk_ppe_debugfs_foe_show_all(struct seq_file *m, void *private)
++{
++ return mtk_ppe_debugfs_foe_show(m, private, false);
++}
++
++static int
++mtk_ppe_debugfs_foe_show_bind(struct seq_file *m, void *private)
++{
++ return mtk_ppe_debugfs_foe_show(m, private, true);
++}
++
++static int
++mtk_ppe_debugfs_foe_open_all(struct inode *inode, struct file *file)
++{
++ return single_open(file, mtk_ppe_debugfs_foe_show_all,
++ inode->i_private);
++}
++
++static int
++mtk_ppe_debugfs_foe_open_bind(struct inode *inode, struct file *file)
++{
++ return single_open(file, mtk_ppe_debugfs_foe_show_bind,
++ inode->i_private);
++}
++
++int mtk_ppe_debugfs_init(struct mtk_ppe *ppe)
++{
++ static const struct file_operations fops_all = {
++ .open = mtk_ppe_debugfs_foe_open_all,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++ };
++
++ static const struct file_operations fops_bind = {
++ .open = mtk_ppe_debugfs_foe_open_bind,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++ };
++
++ struct dentry *root;
++
++ root = debugfs_create_dir("mtk_ppe", NULL);
++ if (!root)
++ return -ENOMEM;
++
++ debugfs_create_file("entries", S_IRUGO, root, ppe, &fops_all);
++ debugfs_create_file("bind", S_IRUGO, root, ppe, &fops_bind);
++
++ return 0;
++}
+--- /dev/null
++++ b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
+@@ -0,0 +1,144 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
++
++#ifndef __MTK_PPE_REGS_H
++#define __MTK_PPE_REGS_H
++
++#define MTK_PPE_GLO_CFG 0x200
++#define MTK_PPE_GLO_CFG_EN BIT(0)
++#define MTK_PPE_GLO_CFG_TSID_EN BIT(1)
++#define MTK_PPE_GLO_CFG_IP4_L4_CS_DROP BIT(2)
++#define MTK_PPE_GLO_CFG_IP4_CS_DROP BIT(3)
++#define MTK_PPE_GLO_CFG_TTL0_DROP BIT(4)
++#define MTK_PPE_GLO_CFG_PPE_BSWAP BIT(5)
++#define MTK_PPE_GLO_CFG_PSE_HASH_OFS BIT(6)
++#define MTK_PPE_GLO_CFG_MCAST_TB_EN BIT(7)
++#define MTK_PPE_GLO_CFG_FLOW_DROP_KA BIT(8)
++#define MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE BIT(9)
++#define MTK_PPE_GLO_CFG_UDP_LITE_EN BIT(10)
++#define MTK_PPE_GLO_CFG_UDP_LEN_DROP BIT(11)
++#define MTK_PPE_GLO_CFG_MCAST_ENTRIES GNEMASK(13, 12)
++#define MTK_PPE_GLO_CFG_BUSY BIT(31)
++
++#define MTK_PPE_FLOW_CFG 0x204
++#define MTK_PPE_FLOW_CFG_IP4_TCP_FRAG BIT(6)
++#define MTK_PPE_FLOW_CFG_IP4_UDP_FRAG BIT(7)
++#define MTK_PPE_FLOW_CFG_IP6_3T_ROUTE BIT(8)
++#define MTK_PPE_FLOW_CFG_IP6_5T_ROUTE BIT(9)
++#define MTK_PPE_FLOW_CFG_IP6_6RD BIT(10)
++#define MTK_PPE_FLOW_CFG_IP4_NAT BIT(12)
++#define MTK_PPE_FLOW_CFG_IP4_NAPT BIT(13)
++#define MTK_PPE_FLOW_CFG_IP4_DSLITE BIT(14)
++#define MTK_PPE_FLOW_CFG_L2_BRIDGE BIT(15)
++#define MTK_PPE_FLOW_CFG_IP_PROTO_BLACKLIST BIT(16)
++#define MTK_PPE_FLOW_CFG_IP4_NAT_FRAG BIT(17)
++#define MTK_PPE_FLOW_CFG_IP4_HASH_FLOW_LABEL BIT(18)
++#define MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY BIT(19)
++#define MTK_PPE_FLOW_CFG_IP6_HASH_GRE_KEY BIT(20)
++
++#define MTK_PPE_IP_PROTO_CHK 0x208
++#define MTK_PPE_IP_PROTO_CHK_IPV4 GENMASK(15, 0)
++#define MTK_PPE_IP_PROTO_CHK_IPV6 GENMASK(31, 16)
++
++#define MTK_PPE_TB_CFG 0x21c
++#define MTK_PPE_TB_CFG_ENTRY_NUM GENMASK(2, 0)
++#define MTK_PPE_TB_CFG_ENTRY_80B BIT(3)
++#define MTK_PPE_TB_CFG_SEARCH_MISS GENMASK(5, 4)
++#define MTK_PPE_TB_CFG_AGE_PREBIND BIT(6)
++#define MTK_PPE_TB_CFG_AGE_NON_L4 BIT(7)
++#define MTK_PPE_TB_CFG_AGE_UNBIND BIT(8)
++#define MTK_PPE_TB_CFG_AGE_TCP BIT(9)
++#define MTK_PPE_TB_CFG_AGE_UDP BIT(10)
++#define MTK_PPE_TB_CFG_AGE_TCP_FIN BIT(11)
++#define MTK_PPE_TB_CFG_KEEPALIVE GENMASK(13, 12)
++#define MTK_PPE_TB_CFG_HASH_MODE GENMASK(15, 14)
++#define MTK_PPE_TB_CFG_SCAN_MODE GENMASK(17, 16)
++#define MTK_PPE_TB_CFG_HASH_DEBUG GENMASK(19, 18)
++
++enum {
++ MTK_PPE_SCAN_MODE_DISABLED,
++ MTK_PPE_SCAN_MODE_CHECK_AGE,
++ MTK_PPE_SCAN_MODE_KEEPALIVE_AGE,
++};
++
++enum {
++ MTK_PPE_KEEPALIVE_DISABLE,
++ MTK_PPE_KEEPALIVE_UNICAST_CPU,
++ MTK_PPE_KEEPALIVE_DUP_CPU = 3,
++};
++
++enum {
++ MTK_PPE_SEARCH_MISS_ACTION_DROP,
++ MTK_PPE_SEARCH_MISS_ACTION_FORWARD = 2,
++ MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD = 3,
++};
++
++#define MTK_PPE_TB_BASE 0x220
++
++#define MTK_PPE_TB_USED 0x224
++#define MTK_PPE_TB_USED_NUM GENMASK(13, 0)
++
++#define MTK_PPE_BIND_RATE 0x228
++#define MTK_PPE_BIND_RATE_BIND GENMASK(15, 0)
++#define MTK_PPE_BIND_RATE_PREBIND GENMASK(31, 16)
++
++#define MTK_PPE_BIND_LIMIT0 0x22c
++#define MTK_PPE_BIND_LIMIT0_QUARTER GENMASK(13, 0)
++#define MTK_PPE_BIND_LIMIT0_HALF GENMASK(29, 16)
++
++#define MTK_PPE_BIND_LIMIT1 0x230
++#define MTK_PPE_BIND_LIMIT1_FULL GENMASK(13, 0)
++#define MTK_PPE_BIND_LIMIT1_NON_L4 GENMASK(23, 16)
++
++#define MTK_PPE_KEEPALIVE 0x234
++#define MTK_PPE_KEEPALIVE_TIME GENMASK(15, 0)
++#define MTK_PPE_KEEPALIVE_TIME_TCP GENMASK(23, 16)
++#define MTK_PPE_KEEPALIVE_TIME_UDP GENMASK(31, 24)
++
++#define MTK_PPE_UNBIND_AGE 0x238
++#define MTK_PPE_UNBIND_AGE_MIN_PACKETS GENMASK(31, 16)
++#define MTK_PPE_UNBIND_AGE_DELTA GENMASK(7, 0)
++
++#define MTK_PPE_BIND_AGE0 0x23c
++#define MTK_PPE_BIND_AGE0_DELTA_NON_L4 GENMASK(30, 16)
++#define MTK_PPE_BIND_AGE0_DELTA_UDP GENMASK(14, 0)
++
++#define MTK_PPE_BIND_AGE1 0x240
++#define MTK_PPE_BIND_AGE1_DELTA_TCP_FIN GENMASK(30, 16)
++#define MTK_PPE_BIND_AGE1_DELTA_TCP GENMASK(14, 0)
++
++#define MTK_PPE_HASH_SEED 0x244
++
++#define MTK_PPE_DEFAULT_CPU_PORT 0x248
++#define MTK_PPE_DEFAULT_CPU_PORT_MASK(_n) (GENMASK(2, 0) << ((_n) * 4))
++
++#define MTK_PPE_MTU_DROP 0x308
++
++#define MTK_PPE_VLAN_MTU0 0x30c
++#define MTK_PPE_VLAN_MTU0_NONE GENMASK(13, 0)
++#define MTK_PPE_VLAN_MTU0_1TAG GENMASK(29, 16)
++
++#define MTK_PPE_VLAN_MTU1 0x310
++#define MTK_PPE_VLAN_MTU1_2TAG GENMASK(13, 0)
++#define MTK_PPE_VLAN_MTU1_3TAG GENMASK(29, 16)
++
++#define MTK_PPE_VPM_TPID 0x318
++
++#define MTK_PPE_CACHE_CTL 0x320
++#define MTK_PPE_CACHE_CTL_EN BIT(0)
++#define MTK_PPE_CACHE_CTL_LOCK_CLR BIT(4)
++#define MTK_PPE_CACHE_CTL_REQ BIT(8)
++#define MTK_PPE_CACHE_CTL_CLEAR BIT(9)
++#define MTK_PPE_CACHE_CTL_CMD GENMASK(13, 12)
++
++#define MTK_PPE_MIB_CFG 0x334
++#define MTK_PPE_MIB_CFG_EN BIT(0)
++#define MTK_PPE_MIB_CFG_RD_CLR BIT(1)
++
++#define MTK_PPE_MIB_TB_BASE 0x338
++
++#define MTK_PPE_MIB_CACHE_CTL 0x350
++#define MTK_PPE_MIB_CACHE_CTL_EN BIT(0)
++#define MTK_PPE_MIB_CACHE_CTL_FLUSH BIT(2)
++
++#endif
diff --git a/target/linux/generic/pending-5.10/770-16-net-ethernet-mtk_eth_soc-add-flow-offloading-support.patch b/target/linux/generic/pending-5.10/770-16-net-ethernet-mtk_eth_soc-add-flow-offloading-support.patch
new file mode 100644
index 0000000000..fa9cbcafc8
--- /dev/null
+++ b/target/linux/generic/pending-5.10/770-16-net-ethernet-mtk_eth_soc-add-flow-offloading-support.patch
@@ -0,0 +1,561 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Thu, 10 Dec 2020 12:19:18 +0100
+Subject: [PATCH] net: ethernet: mtk_eth_soc: add flow offloading support
+
+This adds support for offloading IPv4 routed flows, including SNAT/DNAT,
+one VLAN, and DSA.
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+---
+ create mode 100644 drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+
+--- a/drivers/net/ethernet/mediatek/Makefile
++++ b/drivers/net/ethernet/mediatek/Makefile
+@@ -4,5 +4,5 @@
+ #
+
+ obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
+-mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o
++mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
+ obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -2896,6 +2896,7 @@ static const struct net_device_ops mtk_n
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = mtk_poll_controller,
+ #endif
++ .ndo_setup_tc = mtk_eth_setup_tc,
+ };
+
+ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
+@@ -3161,6 +3162,10 @@ static int mtk_probe(struct platform_dev
+ eth->base + MTK_ETH_PPE_BASE, 2);
+ if (err)
+ goto err_free_dev;
++
++ err = mtk_eth_offload_init(eth);
++ if (err)
++ goto err_free_dev;
+ }
+
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -16,6 +16,7 @@
+ #include <linux/refcount.h>
+ #include <linux/phylink.h>
+ #include <linux/dim.h>
++#include <linux/rhashtable.h>
+ #include "mtk_ppe.h"
+
+ #define MTK_QDMA_PAGE_SIZE 2048
+@@ -41,7 +42,8 @@
+ NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_SG | NETIF_F_TSO | \
+ NETIF_F_TSO6 | \
+- NETIF_F_IPV6_CSUM)
++ NETIF_F_IPV6_CSUM |\
++ NETIF_F_HW_TC)
+ #define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM)
+ #define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
+
+@@ -929,6 +931,7 @@ struct mtk_eth {
+ int ip_align;
+
+ struct mtk_ppe ppe;
++ struct rhashtable flow_table;
+ };
+
+ /* struct mtk_mac - the structure that holds the info about the MACs of the
+@@ -973,4 +976,9 @@ int mtk_gmac_sgmii_path_setup(struct mtk
+ int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id);
+ int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
+
++int mtk_eth_offload_init(struct mtk_eth *eth);
++int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
++ void *type_data);
++
++
+ #endif /* MTK_ETH_H */
+--- /dev/null
++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+@@ -0,0 +1,478 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Copyright (C) 2020 Felix Fietkau <nbd@nbd.name>
++ */
++
++#include <linux/if_ether.h>
++#include <linux/rhashtable.h>
++#include <linux/if_ether.h>
++#include <linux/ip.h>
++#include <net/flow_offload.h>
++#include <net/pkt_cls.h>
++#include <net/dsa.h>
++#include "mtk_eth_soc.h"
++
++struct mtk_flow_data {
++ struct ethhdr eth;
++
++ union {
++ struct {
++ __be32 src_addr;
++ __be32 dst_addr;
++ } v4;
++ };
++
++ __be16 src_port;
++ __be16 dst_port;
++
++ struct {
++ u16 id;
++ __be16 proto;
++ u8 num;
++ } vlan;
++};
++
++struct mtk_flow_entry {
++ struct rhash_head node;
++ unsigned long cookie;
++ u16 hash;
++};
++
++static const struct rhashtable_params mtk_flow_ht_params = {
++ .head_offset = offsetof(struct mtk_flow_entry, node),
++ .head_offset = offsetof(struct mtk_flow_entry, cookie),
++ .key_len = sizeof(unsigned long),
++ .automatic_shrinking = true,
++};
++
++static u32
++mtk_eth_timestamp(struct mtk_eth *eth)
++{
++ return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
++}
++
++static int
++mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
++ bool egress)
++{
++ return mtk_foe_entry_set_ipv4_tuple(foe, egress,
++ data->v4.src_addr, data->src_port,
++ data->v4.dst_addr, data->dst_port);
++}
++
++static void
++mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
++{
++ void *dest = eth + act->mangle.offset;
++ const void *src = &act->mangle.val;
++
++ if (act->mangle.offset > 8)
++ return;
++
++ if (act->mangle.mask == 0xffff) {
++ src += 2;
++ dest += 2;
++ }
++
++ memcpy(dest, src, act->mangle.mask ? 2 : 4);
++}
++
++
++static int
++mtk_flow_mangle_ports(const struct flow_action_entry *act,
++ struct mtk_flow_data *data)
++{
++ u32 val = ntohl(act->mangle.val);
++
++ switch (act->mangle.offset) {
++ case 0:
++ if (act->mangle.mask == ~htonl(0xffff))
++ data->dst_port = cpu_to_be16(val);
++ else
++ data->src_port = cpu_to_be16(val >> 16);
++ break;
++ case 2:
++ data->dst_port = cpu_to_be16(val);
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int
++mtk_flow_mangle_ipv4(const struct flow_action_entry *act,
++ struct mtk_flow_data *data)
++{
++ __be32 *dest;
++
++ switch (act->mangle.offset) {
++ case offsetof(struct iphdr, saddr):
++ dest = &data->v4.src_addr;
++ break;
++ case offsetof(struct iphdr, daddr):
++ dest = &data->v4.dst_addr;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ memcpy(dest, &act->mangle.val, sizeof(u32));
++
++ return 0;
++}
++
++static int
++mtk_flow_get_dsa_port(struct net_device **dev)
++{
++#if IS_ENABLED(CONFIG_NET_DSA)
++ struct dsa_port *dp;
++
++ dp = dsa_port_from_netdev(*dev);
++ if (IS_ERR(dp))
++ return -ENODEV;
++
++ if (!dp->cpu_dp)
++ return -ENODEV;
++
++ if (!dp->cpu_dp->tag_ops)
++ return -ENODEV;
++
++ if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK)
++ return -ENODEV;
++
++ *dev = dp->cpu_dp->master;
++
++ return dp->index;
++#else
++ return -ENODEV;
++#endif
++}
++
++static int
++mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
++ struct net_device *dev)
++{
++ int pse_port, dsa_port;
++
++ dsa_port = mtk_flow_get_dsa_port(&dev);
++ if (dsa_port >= 0)
++ mtk_foe_entry_set_dsa(foe, dsa_port);
++
++ if (dev == eth->netdev[0])
++ pse_port = 1;
++ else if (dev == eth->netdev[1])
++ pse_port = 2;
++ else
++ return -EOPNOTSUPP;
++
++ mtk_foe_entry_set_pse_port(foe, pse_port);
++
++ return 0;
++}
++
++static int
++mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
++{
++ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
++ struct flow_action_entry *act;
++ struct mtk_flow_data data = {};
++ struct mtk_foe_entry foe;
++ struct net_device *odev = NULL;
++ struct mtk_flow_entry *entry;
++ int offload_type = 0;
++ u16 addr_type = 0;
++ u32 timestamp;
++ u8 l4proto = 0;
++ int err = 0;
++ int hash;
++ int i;
++
++ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
++ struct flow_match_meta match;
++
++ flow_rule_match_meta(rule, &match);
++ } else {
++ return -EOPNOTSUPP;
++ }
++
++ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
++ struct flow_match_control match;
++
++ flow_rule_match_control(rule, &match);
++ addr_type = match.key->addr_type;
++ } else {
++ return -EOPNOTSUPP;
++ }
++
++ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
++ struct flow_match_basic match;
++
++ flow_rule_match_basic(rule, &match);
++ l4proto = match.key->ip_proto;
++ } else {
++ return -EOPNOTSUPP;
++ }
++
++ flow_action_for_each(i, act, &rule->action) {
++ switch (act->id) {
++ case FLOW_ACTION_MANGLE:
++ if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
++ mtk_flow_offload_mangle_eth(act, &data.eth);
++ break;
++ case FLOW_ACTION_REDIRECT:
++ odev = act->dev;
++ break;
++ case FLOW_ACTION_CSUM:
++ break;
++ case FLOW_ACTION_VLAN_PUSH:
++ if (data.vlan.num == 1 ||
++ data.vlan.proto != ETH_P_8021Q)
++ return -EOPNOTSUPP;
++
++ data.vlan.id = act->vlan.vid;
++ data.vlan.proto = act->vlan.proto;
++ data.vlan.num++;
++ break;
++ default:
++ return -EOPNOTSUPP;
++ }
++ }
++
++ switch (addr_type) {
++ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
++ offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
++ break;
++ default:
++ return -EOPNOTSUPP;
++ }
++
++ if (!is_valid_ether_addr(data.eth.h_source) ||
++ !is_valid_ether_addr(data.eth.h_dest))
++ return -EINVAL;
++
++ err = mtk_foe_entry_prepare(&foe, offload_type, l4proto, 0,
++ data.eth.h_source,
++ data.eth.h_dest);
++ if (err)
++ return err;
++
++ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
++ struct flow_match_ports ports;
++
++ flow_rule_match_ports(rule, &ports);
++ data.src_port = ports.key->src;
++ data.dst_port = ports.key->dst;
++ } else {
++ return -EOPNOTSUPP;
++ }
++
++ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
++ struct flow_match_ipv4_addrs addrs;
++
++ flow_rule_match_ipv4_addrs(rule, &addrs);
++
++ data.v4.src_addr = addrs.key->src;
++ data.v4.dst_addr = addrs.key->dst;
++
++ mtk_flow_set_ipv4_addr(&foe, &data, false);
++ }
++
++ flow_action_for_each(i, act, &rule->action) {
++ if (act->id != FLOW_ACTION_MANGLE)
++ continue;
++
++ switch (act->mangle.htype) {
++ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
++ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
++ err = mtk_flow_mangle_ports(act, &data);
++ break;
++ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
++ err = mtk_flow_mangle_ipv4(act, &data);
++ break;
++ case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
++ /* handled earlier */
++ break;
++ default:
++ return -EOPNOTSUPP;
++ }
++
++ if (err)
++ return err;
++ }
++
++ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
++ err = mtk_flow_set_ipv4_addr(&foe, &data, true);
++ if (err)
++ return err;
++ }
++
++ if (data.vlan.num == 1) {
++ if (data.vlan.proto != ETH_P_8021Q)
++ return -EOPNOTSUPP;
++
++ mtk_foe_entry_set_vlan(&foe, data.vlan.id);
++ }
++
++ err = mtk_flow_set_output_device(eth, &foe, odev);
++ if (err)
++ return err;
++
++ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
++ if (!entry)
++ return -ENOMEM;
++
++ entry->cookie = f->cookie;
++ timestamp = mtk_eth_timestamp(eth);
++ hash = mtk_foe_entry_commit(&eth->ppe, &foe, timestamp);
++ if (hash < 0) {
++ err = hash;
++ goto free;
++ }
++
++ entry->hash = hash;
++ err = rhashtable_insert_fast(&eth->flow_table, &entry->node,
++ mtk_flow_ht_params);
++ if (err < 0)
++ goto clear_flow;
++
++ return 0;
++clear_flow:
++ mtk_foe_entry_clear(&eth->ppe, hash);
++free:
++ kfree(entry);
++ return err;
++}
++
++static int
++mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
++{
++ struct mtk_flow_entry *entry;
++
++ entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
++ mtk_flow_ht_params);
++ if (!entry)
++ return -ENOENT;
++
++ mtk_foe_entry_clear(&eth->ppe, entry->hash);
++ rhashtable_remove_fast(&eth->flow_table, &entry->node,
++ mtk_flow_ht_params);
++ kfree(entry);
++
++ return 0;
++}
++
++static int
++mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
++{
++ struct mtk_flow_entry *entry;
++ int timestamp;
++ u32 idle;
++
++ entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
++ mtk_flow_ht_params);
++ if (!entry)
++ return -ENOENT;
++
++ timestamp = mtk_foe_entry_timestamp(&eth->ppe, entry->hash);
++ if (timestamp < 0)
++ return -ETIMEDOUT;
++
++ idle = mtk_eth_timestamp(eth) - timestamp;
++ f->stats.lastused = jiffies - idle * HZ;
++
++ return 0;
++}
++
++static int
++mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
++{
++ struct flow_cls_offload *cls = type_data;
++ struct net_device *dev = cb_priv;
++ struct mtk_mac *mac = netdev_priv(dev);
++ struct mtk_eth *eth = mac->hw;
++
++ if (!tc_can_offload(dev))
++ return -EOPNOTSUPP;
++
++ if (type != TC_SETUP_CLSFLOWER)
++ return -EOPNOTSUPP;
++
++ switch (cls->command) {
++ case FLOW_CLS_REPLACE:
++ return mtk_flow_offload_replace(eth, cls);
++ case FLOW_CLS_DESTROY:
++ return mtk_flow_offload_destroy(eth, cls);
++ case FLOW_CLS_STATS:
++ return mtk_flow_offload_stats(eth, cls);
++ default:
++ return -EOPNOTSUPP;
++ }
++
++ return 0;
++}
++
++static int
++mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
++{
++ struct mtk_mac *mac = netdev_priv(dev);
++ struct mtk_eth *eth = mac->hw;
++ static LIST_HEAD(block_cb_list);
++ struct flow_block_cb *block_cb;
++ flow_setup_cb_t *cb;
++
++ if (!eth->ppe.foe_table)
++ return -EOPNOTSUPP;
++
++ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
++ return -EOPNOTSUPP;
++
++ cb = mtk_eth_setup_tc_block_cb;
++ f->driver_block_list = &block_cb_list;
++
++ switch (f->command) {
++ case FLOW_BLOCK_BIND:
++ block_cb = flow_block_cb_lookup(f->block, cb, dev);
++ if (block_cb) {
++ flow_block_cb_incref(block_cb);
++ return 0;
++ }
++ block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
++ if (IS_ERR(block_cb))
++ return PTR_ERR(block_cb);
++
++ flow_block_cb_add(block_cb, f);
++ list_add_tail(&block_cb->driver_list, &block_cb_list);
++ return 0;
++ case FLOW_BLOCK_UNBIND:
++ block_cb = flow_block_cb_lookup(f->block, cb, dev);
++ if (!block_cb)
++ return -ENOENT;
++
++ if (flow_block_cb_decref(block_cb)) {
++ flow_block_cb_remove(block_cb, f);
++ list_del(&block_cb->driver_list);
++ }
++ return 0;
++ default:
++ return -EOPNOTSUPP;
++ }
++}
++
++int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
++ void *type_data)
++{
++ if (type == TC_SETUP_FT)
++ return mtk_eth_setup_tc_block(dev, type_data);
++
++ return -EOPNOTSUPP;
++}
++
++int mtk_eth_offload_init(struct mtk_eth *eth)
++{
++ if (!eth->ppe.foe_table)
++ return 0;
++
++ return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
++}
diff --git a/target/linux/generic/pending-5.10/800-bcma-get-SoC-device-struct-copy-its-DMA-params-to-th.patch b/target/linux/generic/pending-5.10/800-bcma-get-SoC-device-struct-copy-its-DMA-params-to-th.patch
new file mode 100644
index 0000000000..cb02c71829
--- /dev/null
+++ b/target/linux/generic/pending-5.10/800-bcma-get-SoC-device-struct-copy-its-DMA-params-to-th.patch
@@ -0,0 +1,70 @@
+From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl>
+Subject: [PATCH] bcma: get SoC device struct & copy its DMA params to the
+ subdevices
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+For bus devices to be fully usable it's required to set their DMA
+parameters.
+
+For years it has been missing and remained unnoticed because of
+mips_dma_alloc_coherent() silently handling the empty coherent_dma_mask.
+Kernel 4.19 came with a lot of DMA changes and caused a regression on
+the bcm47xx. Starting with the commit f8c55dc6e828 ("MIPS: use generic
+dma noncoherent ops for simple noncoherent platforms") DMA coherent
+allocations just fail. Example:
+[ 1.114914] bgmac_bcma bcma0:2: Allocation of TX ring 0x200 failed
+[ 1.121215] bgmac_bcma bcma0:2: Unable to alloc memory for DMA
+[ 1.127626] bgmac_bcma: probe of bcma0:2 failed with error -12
+[ 1.133838] bgmac_bcma: Broadcom 47xx GBit MAC driver loaded
+
+This change fixes above regression in addition to the MIPS bcm47xx
+commit 321c46b91550 ("MIPS: BCM47XX: Setup struct device for the SoC").
+
+It also fixes another *old* GPIO regression caused by a parent pointing
+to the NULL:
+[ 0.157054] missing gpiochip .dev parent pointer
+[ 0.157287] bcma: bus0: Error registering GPIO driver: -22
+introduced by the commit 74f4e0cc6108 ("bcma: switch GPIO portions to
+use GPIOLIB_IRQCHIP").
+
+Fixes: f8c55dc6e828 ("MIPS: use generic dma noncoherent ops for simple noncoherent platforms")
+Fixes: 74f4e0cc6108 ("bcma: switch GPIO portions to use GPIOLIB_IRQCHIP")
+Cc: linux-mips@linux-mips.org
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Rafał Miłecki <rafal@milecki.pl>
+---
+
+--- a/drivers/bcma/host_soc.c
++++ b/drivers/bcma/host_soc.c
+@@ -191,6 +191,8 @@ int __init bcma_host_soc_init(struct bcm
+ struct bcma_bus *bus = &soc->bus;
+ int err;
+
++ bus->dev = soc->dev;
++
+ /* Scan bus and initialize it */
+ err = bcma_bus_early_register(bus);
+ if (err)
+--- a/drivers/bcma/main.c
++++ b/drivers/bcma/main.c
+@@ -236,12 +236,16 @@ EXPORT_SYMBOL(bcma_core_irq);
+
+ void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
+ {
++ struct device *dev = &core->dev;
++
+ core->dev.release = bcma_release_core_dev;
+ core->dev.bus = &bcma_bus_type;
+ dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index);
+ core->dev.parent = bus->dev;
+- if (bus->dev)
++ if (bus->dev) {
+ bcma_of_fill_device(bus->dev, core);
++ dma_coerce_mask_and_coherent(dev, bus->dev->coherent_dma_mask);
++ }
+
+ switch (bus->hosttype) {
+ case BCMA_HOSTTYPE_PCI:
diff --git a/target/linux/generic/pending-5.10/810-pci_disable_common_quirks.patch b/target/linux/generic/pending-5.10/810-pci_disable_common_quirks.patch
new file mode 100644
index 0000000000..7e7866add8
--- /dev/null
+++ b/target/linux/generic/pending-5.10/810-pci_disable_common_quirks.patch
@@ -0,0 +1,62 @@
+From: Gabor Juhos <juhosg@openwrt.org>
+Subject: debloat: add kernel config option to disabling common PCI quirks
+
+Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
+---
+ drivers/pci/Kconfig | 6 ++++++
+ drivers/pci/quirks.c | 6 ++++++
+ 2 files changed, 12 insertions(+)
+
+--- a/drivers/pci/Kconfig
++++ b/drivers/pci/Kconfig
+@@ -118,6 +118,13 @@ config XEN_PCIDEV_FRONTEND
+ The PCI device frontend driver allows the kernel to import arbitrary
+ PCI devices from a PCI backend to support PCI driver domains.
+
++config PCI_DISABLE_COMMON_QUIRKS
++ bool "PCI disable common quirks"
++ depends on PCI
++ help
++ If you don't know what to do here, say N.
++
++
+ config PCI_ATS
+ bool
+
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -205,6 +205,7 @@ static void quirk_mmio_always_on(struct
+ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_BRIDGE_HOST, 8, quirk_mmio_always_on);
+
++#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS
+ /*
+ * The Mellanox Tavor device gives false positive parity errors. Mark this
+ * device with a broken_parity_status to allow PCI scanning code to "skip"
+@@ -3320,6 +3321,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_I
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata);
+
++#endif /* !CONFIG_PCI_DISABLE_COMMON_QUIRKS */
++
+ /*
+ * Ivytown NTB BAR sizes are misreported by the hardware due to an erratum.
+ * To work around this, query the size it should be configured to by the
+@@ -3345,6 +3348,8 @@ static void quirk_intel_ntb(struct pci_d
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e08, quirk_intel_ntb);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e0d, quirk_intel_ntb);
+
++#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS
++
+ /*
+ * Some BIOS implementations leave the Intel GPU interrupts enabled, even
+ * though no one is handling them (e.g., if the i915 driver is never
+@@ -3383,6 +3388,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_IN
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq);
+
++#endif /* !CONFIG_PCI_DISABLE_COMMON_QUIRKS */
++
+ /*
+ * PCI devices which are on Intel chips can skip the 10ms delay
+ * before entering D3 mode.
diff --git a/target/linux/generic/pending-5.10/811-pci_disable_usb_common_quirks.patch b/target/linux/generic/pending-5.10/811-pci_disable_usb_common_quirks.patch
new file mode 100644
index 0000000000..42a8397839
--- /dev/null
+++ b/target/linux/generic/pending-5.10/811-pci_disable_usb_common_quirks.patch
@@ -0,0 +1,115 @@
+From: Felix Fietkau <nbd@nbd.name>
+Subject: debloat: disable common USB quirks
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ drivers/usb/host/pci-quirks.c | 16 ++++++++++++++++
+ drivers/usb/host/pci-quirks.h | 18 +++++++++++++++++-
+ include/linux/usb/hcd.h | 7 +++++++
+ 3 files changed, 40 insertions(+), 1 deletion(-)
+
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -128,6 +128,8 @@ struct amd_chipset_type {
+ u8 rev;
+ };
+
++#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS
++
+ static struct amd_chipset_info {
+ struct pci_dev *nb_dev;
+ struct pci_dev *smbus_dev;
+@@ -633,6 +635,10 @@ bool usb_amd_pt_check_port(struct device
+ }
+ EXPORT_SYMBOL_GPL(usb_amd_pt_check_port);
+
++#endif /* CONFIG_PCI_DISABLE_COMMON_QUIRKS */
++
++#if IS_ENABLED(CONFIG_USB_UHCI_HCD)
++
+ /*
+ * Make sure the controller is completely inactive, unable to
+ * generate interrupts or do DMA.
+@@ -712,8 +718,17 @@ reset_needed:
+ uhci_reset_hc(pdev, base);
+ return 1;
+ }
++#else
++int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base)
++{
++ return 0;
++}
++
++#endif
+ EXPORT_SYMBOL_GPL(uhci_check_and_reset_hc);
+
++#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS
++
+ static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask)
+ {
+ u16 cmd;
+@@ -1285,3 +1300,4 @@ static void quirk_usb_early_handoff(stru
+ }
+ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff);
++#endif
+--- a/drivers/usb/host/pci-quirks.h
++++ b/drivers/usb/host/pci-quirks.h
+@@ -5,6 +5,9 @@
+ #ifdef CONFIG_USB_PCI
+ void uhci_reset_hc(struct pci_dev *pdev, unsigned long base);
+ int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base);
++#endif /* CONFIG_USB_PCI */
++
++#if defined(CONFIG_USB_PCI) && !defined(CONFIG_PCI_DISABLE_COMMON_QUIRKS)
+ int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev);
+ bool usb_amd_hang_symptom_quirk(void);
+ bool usb_amd_prefetch_quirk(void);
+@@ -19,6 +22,18 @@ void sb800_prefetch(struct device *dev,
+ bool usb_amd_pt_check_port(struct device *device, int port);
+ #else
+ struct pci_dev;
++static inline int usb_amd_quirk_pll_check(void)
++{
++ return 0;
++}
++static inline bool usb_amd_hang_symptom_quirk(void)
++{
++ return false;
++}
++static inline bool usb_amd_prefetch_quirk(void)
++{
++ return false;
++}
+ static inline void usb_amd_quirk_pll_disable(void) {}
+ static inline void usb_amd_quirk_pll_enable(void) {}
+ static inline void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev) {}
+@@ -29,6 +44,11 @@ static inline bool usb_amd_pt_check_port
+ {
+ return false;
+ }
++static inline void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev) {}
++static inline bool usb_xhci_needs_pci_reset(struct pci_dev *pdev)
++{
++ return false;
++}
+ #endif /* CONFIG_USB_PCI */
+
+ #endif /* __LINUX_USB_PCI_QUIRKS_H */
+--- a/include/linux/usb/hcd.h
++++ b/include/linux/usb/hcd.h
+@@ -484,7 +484,14 @@ extern int usb_hcd_pci_probe(struct pci_
+ extern void usb_hcd_pci_remove(struct pci_dev *dev);
+ extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
+
++#ifndef CONFIG_PCI_DISABLE_COMMON_QUIRKS
+ extern int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *dev);
++#else
++static inline int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *dev)
++{
++ return 0;
++}
++#endif
+
+ #ifdef CONFIG_PM
+ extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
diff --git a/target/linux/generic/pending-5.10/820-w1-gpio-fix-problem-with-platfom-data-in-w1-gpio.patch b/target/linux/generic/pending-5.10/820-w1-gpio-fix-problem-with-platfom-data-in-w1-gpio.patch
new file mode 100644
index 0000000000..33eb34c913
--- /dev/null
+++ b/target/linux/generic/pending-5.10/820-w1-gpio-fix-problem-with-platfom-data-in-w1-gpio.patch
@@ -0,0 +1,26 @@
+From d9c8bc8c1408f3e8529db6e4e04017b4c579c342 Mon Sep 17 00:00:00 2001
+From: Pawel Dembicki <paweldembicki@gmail.com>
+Date: Sun, 18 Feb 2018 17:08:04 +0100
+Subject: [PATCH] w1: gpio: fix problem with platfom data in w1-gpio
+
+In devices, where fdt is used, is impossible to apply platform data
+without proper fdt node.
+
+This patch allow to use platform data in devices with fdt.
+
+Signed-off-by: Pawel Dembicki <paweldembicki@gmail.com>
+---
+ drivers/w1/masters/w1-gpio.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/drivers/w1/masters/w1-gpio.c
++++ b/drivers/w1/masters/w1-gpio.c
+@@ -76,7 +76,7 @@ static int w1_gpio_probe(struct platform
+ enum gpiod_flags gflags = GPIOD_OUT_LOW_OPEN_DRAIN;
+ int err;
+
+- if (of_have_populated_dt()) {
++ if (of_have_populated_dt() && !dev_get_platdata(&pdev->dev)) {
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
diff --git a/target/linux/generic/pending-5.10/834-ledtrig-libata.patch b/target/linux/generic/pending-5.10/834-ledtrig-libata.patch
new file mode 100644
index 0000000000..623e48085d
--- /dev/null
+++ b/target/linux/generic/pending-5.10/834-ledtrig-libata.patch
@@ -0,0 +1,149 @@
+From: Daniel Golle <daniel@makrotopia.org>
+Subject: libata: add ledtrig support
+
+This adds a LED trigger for each ATA port indicating disk activity.
+
+As this is needed only on specific platforms (NAS SoCs and such),
+these platforms should define ARCH_WANTS_LIBATA_LEDS if there
+are boards with LED(s) intended to indicate ATA disk activity and
+need the OS to take care of that.
+In that way, if not selected, LED trigger support not will be
+included in libata-core and both, codepaths and structures remain
+untouched.
+
+Signed-off-by: Daniel Golle <daniel@makrotopia.org>
+---
+ drivers/ata/Kconfig | 16 ++++++++++++++++
+ drivers/ata/libata-core.c | 41 +++++++++++++++++++++++++++++++++++++++++
+ include/linux/libata.h | 9 +++++++++
+ 3 files changed, 66 insertions(+)
+
+--- a/drivers/ata/Kconfig
++++ b/drivers/ata/Kconfig
+@@ -67,6 +67,22 @@ config ATA_FORCE
+
+ If unsure, say Y.
+
++config ARCH_WANT_LIBATA_LEDS
++ bool
++
++config ATA_LEDS
++ bool "support ATA port LED triggers"
++ depends on ARCH_WANT_LIBATA_LEDS
++ select NEW_LEDS
++ select LEDS_CLASS
++ select LEDS_TRIGGERS
++ default y
++ help
++ This option adds a LED trigger for each registered ATA port.
++ It is used to drive disk activity leds connected via GPIO.
++
++ If unsure, say N.
++
+ config ATA_ACPI
+ bool "ATA ACPI Support"
+ depends on ACPI
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -650,6 +650,19 @@ u64 ata_tf_read_block(const struct ata_t
+ return block;
+ }
+
++#ifdef CONFIG_ATA_LEDS
++#define LIBATA_BLINK_DELAY 20 /* ms */
++static inline void ata_led_act(struct ata_port *ap)
++{
++ unsigned long led_delay = LIBATA_BLINK_DELAY;
++
++ if (unlikely(!ap->ledtrig))
++ return;
++
++ led_trigger_blink_oneshot(ap->ledtrig, &led_delay, &led_delay, 0);
++}
++#endif
++
+ /**
+ * ata_build_rw_tf - Build ATA taskfile for given read/write request
+ * @tf: Target ATA taskfile
+@@ -4513,6 +4526,9 @@ struct ata_queued_cmd *ata_qc_new_init(s
+ if (tag < 0)
+ return NULL;
+ }
++#ifdef CONFIG_ATA_LEDS
++ ata_led_act(ap);
++#endif
+
+ qc = __ata_qc_from_tag(ap, tag);
+ qc->tag = qc->hw_tag = tag;
+@@ -5291,6 +5307,9 @@ struct ata_port *ata_port_alloc(struct a
+ ap->stats.unhandled_irq = 1;
+ ap->stats.idle_irq = 1;
+ #endif
++#ifdef CONFIG_ATA_LEDS
++ ap->ledtrig = kzalloc(sizeof(struct led_trigger), GFP_KERNEL);
++#endif
+ ata_sff_port_init(ap);
+
+ return ap;
+@@ -5326,6 +5345,12 @@ static void ata_host_release(struct kref
+
+ kfree(ap->pmp_link);
+ kfree(ap->slave_link);
++#ifdef CONFIG_ATA_LEDS
++ if (ap->ledtrig) {
++ led_trigger_unregister(ap->ledtrig);
++ kfree(ap->ledtrig);
++ };
++#endif
+ kfree(ap);
+ host->ports[i] = NULL;
+ }
+@@ -5732,7 +5757,23 @@ int ata_host_register(struct ata_host *h
+ host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
+ host->ports[i]->local_port_no = i + 1;
+ }
++#ifdef CONFIG_ATA_LEDS
++ for (i = 0; i < host->n_ports; i++) {
++ if (unlikely(!host->ports[i]->ledtrig))
++ continue;
++
++ snprintf(host->ports[i]->ledtrig_name,
++ sizeof(host->ports[i]->ledtrig_name), "ata%u",
++ host->ports[i]->print_id);
+
++ host->ports[i]->ledtrig->name = host->ports[i]->ledtrig_name;
++
++ if (led_trigger_register(host->ports[i]->ledtrig)) {
++ kfree(host->ports[i]->ledtrig);
++ host->ports[i]->ledtrig = NULL;
++ }
++ }
++#endif
+ /* Create associated sysfs transport objects */
+ for (i = 0; i < host->n_ports; i++) {
+ rc = ata_tport_add(host->dev,host->ports[i]);
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -23,6 +23,9 @@
+ #include <linux/cdrom.h>
+ #include <linux/sched.h>
+ #include <linux/async.h>
++#ifdef CONFIG_ATA_LEDS
++#include <linux/leds.h>
++#endif
+
+ /*
+ * Define if arch has non-standard setup. This is a _PCI_ standard
+@@ -882,6 +885,12 @@ struct ata_port {
+ #ifdef CONFIG_ATA_ACPI
+ struct ata_acpi_gtm __acpi_init_gtm; /* use ata_acpi_init_gtm() */
+ #endif
++
++#ifdef CONFIG_ATA_LEDS
++ struct led_trigger *ledtrig;
++ char ledtrig_name[8];
++#endif
++
+ /* owned by EH */
+ u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned;
+ };
diff --git a/target/linux/generic/pending-5.10/920-mangle_bootargs.patch b/target/linux/generic/pending-5.10/920-mangle_bootargs.patch
new file mode 100644
index 0000000000..14fdef31fd
--- /dev/null
+++ b/target/linux/generic/pending-5.10/920-mangle_bootargs.patch
@@ -0,0 +1,71 @@
+From: Imre Kaloz <kaloz@openwrt.org>
+Subject: init: add CONFIG_MANGLE_BOOTARGS and disable it by default
+
+Enabling this option renames the bootloader supplied root=
+and rootfstype= variables, which might have to be know but
+would break the automatisms OpenWrt uses.
+
+Signed-off-by: Imre Kaloz <kaloz@openwrt.org>
+---
+ init/Kconfig | 9 +++++++++
+ init/main.c | 24 ++++++++++++++++++++++++
+ 2 files changed, 33 insertions(+)
+
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -1780,6 +1780,15 @@ config EMBEDDED
+ an embedded system so certain expert options are available
+ for configuration.
+
++config MANGLE_BOOTARGS
++ bool "Rename offending bootargs"
++ depends on EXPERT
++ help
++ Sometimes the bootloader passed bogus root= and rootfstype=
++ parameters to the kernel, and while you want to ignore them,
++ you need to know the values f.e. to support dual firmware
++ layouts on the flash.
++
+ config HAVE_PERF_EVENTS
+ bool
+ help
+--- a/init/main.c
++++ b/init/main.c
+@@ -607,6 +607,29 @@ static inline void setup_nr_cpu_ids(void
+ static inline void smp_prepare_cpus(unsigned int maxcpus) { }
+ #endif
+
++#ifdef CONFIG_MANGLE_BOOTARGS
++static void __init mangle_bootargs(char *command_line)
++{
++ char *rootdev;
++ char *rootfs;
++
++ rootdev = strstr(command_line, "root=/dev/mtdblock");
++
++ if (rootdev)
++ strncpy(rootdev, "mangled_rootblock=", 18);
++
++ rootfs = strstr(command_line, "rootfstype");
++
++ if (rootfs)
++ strncpy(rootfs, "mangled_fs", 10);
++
++}
++#else
++static void __init mangle_bootargs(char *command_line)
++{
++}
++#endif
++
+ /*
+ * We need to store the untouched command line for future reference.
+ * We also need to store the touched command line since the parameter
+@@ -868,6 +891,7 @@ asmlinkage __visible void __init __no_sa
+ pr_notice("%s", linux_banner);
+ early_security_init();
+ setup_arch(&command_line);
++ mangle_bootargs(command_line);
+ setup_boot_config(command_line);
+ setup_command_line(command_line);
+ setup_nr_cpu_ids();